From 59ed0acb5658a1e7b2da683ff460208157ea0364 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Arno=20T=C3=B6ll?= Date: Wed, 23 Jul 2014 15:43:00 +0000 Subject: [PATCH] Import trafficserver_5.0.1.orig.tar.bz2 [dgit import orig trafficserver_5.0.1.orig.tar.bz2] --- .clang-format | 25 + .gitmodules | 3 + .indent.pro | 29 + .vimrc | 7 + CHANGES | 3744 ++ CRUFT.txt | 49 + INSTALL | 88 + LAYOUT | 32 + LICENSE | 427 + Makefile.am | 120 + Makefile.in | 1146 + NOTICE | 63 + README | 283 + README-EC2 | 134 + REVIEWERS | 117 + STATUS | 65 + Vagrantfile | 142 + aclocal.m4 | 1428 + build/aux/ar-lib | 270 + build/aux/compile | 347 + build/aux/config.guess | 1535 + build/aux/config.sub | 1790 + build/aux/depcomp | 791 + build/aux/install-sh | 527 + build/aux/ltmain.sh | 9655 +++++ build/aux/missing | 215 + build/aux/test-driver | 127 + build/aux/ylwrap | 249 + build/ax_boost_base.m4 | 258 + build/ax_check_openssl.m4 | 136 + build/ax_compare_version.m4 | 177 + build/ax_compiler_vendor.m4 | 84 + build/ax_cxx_compile_stdcxx_11.m4 | 116 + build/ax_lib_curl.m4 | 38 + build/ax_lib_readline.m4 | 110 + build/ax_path_generic.m4 | 164 + build/ax_perl_modules.m4 | 51 + build/ax_with_curses.m4 | 518 + build/common.m4 | 585 + build/crypto.m4 | 123 + build/jemalloc.m4 | 78 + build/libtool.m4 | 7992 ++++ build/ltoptions.m4 | 384 + build/ltsugar.m4 | 123 + build/ltversion.m4 | 23 + build/lt~obsolete.m4 | 98 + build/lzma.m4 | 100 + build/network.m4 | 125 + build/pcre.m4 | 114 + build/pkg.m4 | 199 + build/plugins.mk | 37 + build/tcl.m4 | 3306 ++ build/tcmalloc.m4 | 54 + build/xml.m4 | 200 + build/zlib.m4 | 100 + cmd/Makefile.am | 18 + cmd/Makefile.in | 866 + cmd/traffic_cop/Makefile.am | 43 + cmd/traffic_cop/Makefile.in | 905 + cmd/traffic_cop/traffic_cop.cc | 1840 + cmd/traffic_line/Makefile.am | 36 + cmd/traffic_line/Makefile.in | 902 + cmd/traffic_line/traffic_line.cc | 376 + cmd/traffic_top/Makefile.am | 41 + cmd/traffic_top/Makefile.in | 922 + cmd/traffic_top/README | 4 + cmd/traffic_top/stats.h | 456 + cmd/traffic_top/traffic_top.cc | 465 + config.layout | 279 + configure | 30876 ++++++++++++++++ configure.ac | 2000 + contrib/install_trafficserver.sh | 401 + contrib/make-ssl-multicert | 47 + contrib/manifests/debian.pp | 43 + contrib/manifests/omnios.sh | 38 + contrib/manifests/redhat.pp | 61 + contrib/python/compare_RecordsConfigcc.py | 119 + contrib/python/compare_records_config.py | 67 + contrib/set_trafficserver.sh | 100 + doc/Doxyfile.in | 1283 + doc/Makefile.am | 147 + doc/Makefile.in | 1048 + doc/admin/cluster-howto.en.rst | 168 + doc/admin/configuring-cache.en.rst | 389 + doc/admin/configuring-traffic-server.en.rst | 83 + doc/admin/event-logging-formats.en.rst | 413 + doc/admin/explicit-proxy-caching.en.rst | 94 + doc/admin/faqs.en.rst | 392 + doc/admin/forward-proxy.en.rst | 80 + doc/admin/getting-started.en.rst | 189 + doc/admin/hierachical-caching.en.rst | 180 + doc/admin/http-proxy-caching.en.rst | 846 + doc/admin/index.en.rst | 319 + doc/admin/monitoring-traffic.en.rst | 104 + doc/admin/performance-tuning.en.rst | 54 + doc/admin/reverse-proxy-http-redirects.en.rst | 317 + doc/admin/security-options.en.rst | 228 + doc/admin/session-protocol.en.rst | 55 + .../traffic-server-error-messages.en.rst | 373 + doc/admin/transparent-proxy.en.rst | 113 + doc/admin/transparent-proxy/bridge.en.rst | 177 + doc/admin/transparent-proxy/build.en.rst | 60 + .../transparent-proxy/router-inline.en.rst | 91 + .../wccp-configuration.en.rst | 155 + doc/admin/working-log-files.en.rst | 1157 + doc/arch/cache/cache-api.en.rst | 42 + doc/arch/cache/cache-appendix.en.rst | 99 + doc/arch/cache/cache-arch.en.rst | 854 + doc/arch/cache/cache-data-structures.en.rst | 164 + doc/arch/cache/cache.en.rst | 33 + doc/arch/cache/common.defs | 48 + .../images/ats-cache-volume-definition.png | Bin 0 -> 3086 bytes .../images/ats-cache-volume-directory.png | Bin 0 -> 34949 bytes .../cache/images/ats-cache-volume-layout.png | Bin 0 -> 8471 bytes .../cache/images/ats-cache-write-cursor.png | Bin 0 -> 6593 bytes .../images/cache-directory-structure.png | Bin 0 -> 28553 bytes .../cache/images/cache-doc-layout-3-2-0.png | Bin 0 -> 7357 bytes .../cache/images/cache-doc-layout-4-0-1.png | Bin 0 -> 8524 bytes .../cache/images/cache-multi-fragment.png | Bin 0 -> 47782 bytes doc/arch/cache/images/cache-span-layout.png | Bin 0 -> 8533 bytes doc/arch/cache/images/cache-spans.png | Bin 0 -> 5085 bytes doc/arch/cache/images/cache-stripe-layout.png | Bin 0 -> 11594 bytes doc/arch/cache/images/dir-bucket-assign.png | Bin 0 -> 8815 bytes doc/arch/cache/images/dir-segment-bucket.png | Bin 0 -> 5720 bytes doc/arch/cache/ram-cache.en.rst | 88 + doc/arch/cache/tier-storage.en.rst | 118 + doc/arch/hacking/config-var-impl.en.rst | 283 + doc/arch/hacking/index.en.rst | 30 + doc/arch/index.en.rst | 37 + doc/arch/proposals/hostdb.en.rst | 168 + doc/checkvers.py | 41 + doc/conf.py | 381 + doc/doap.rdf | 64 + doc/dot/ResponseDiag.dot | 70 + doc/dot/SimpleStateDiag.dot | 65 + doc/dot/SimpleStateDiagAPI.dot | 86 + doc/ext/traffic-server.py | 238 + doc/glossary.en.rst | 101 + doc/index.rst | 53 + .../ja/LC_MESSAGES/admin/cluster-howto.en.po | 308 + .../LC_MESSAGES/admin/configuring-cache.en.po | 593 + .../admin/configuring-traffic-server.en.po | 125 + .../admin/event-logging-formats.en.po | 1022 + .../admin/explicit-proxy-caching.en.po | 123 + doc/locale/ja/LC_MESSAGES/admin/faqs.en.po | 760 + .../ja/LC_MESSAGES/admin/forward-proxy.en.po | 154 + .../LC_MESSAGES/admin/getting-started.en.po | 348 + .../admin/hierachical-caching.en.po | 184 + .../admin/http-proxy-caching.en.po | 1885 + doc/locale/ja/LC_MESSAGES/admin/index.en.po | 643 + .../admin/monitoring-traffic.en.po | 157 + .../admin/performance-tuning.en.po | 72 + .../admin/reverse-proxy-http-redirects.en.po | 626 + .../LC_MESSAGES/admin/security-options.en.po | 493 + .../admin/traffic-server-error-messages.en.po | 872 + .../LC_MESSAGES/admin/transparent-proxy.en.po | 218 + .../admin/transparent-proxy/bridge.en.po | 298 + .../admin/transparent-proxy/build.en.po | 111 + .../transparent-proxy/router-inline.en.po | 162 + .../wccp-configuration.en.po | 300 + .../LC_MESSAGES/admin/working-log-files.en.po | 1457 + .../admin/working-log-files/log-formats.en.po | 545 + .../ja/LC_MESSAGES/arch/cache/cache-api.en.po | 63 + .../arch/cache/cache-appendix.en.po | 253 + .../LC_MESSAGES/arch/cache/cache-arch.en.po | 2106 ++ .../arch/cache/cache-data-structures.en.po | 288 + .../ja/LC_MESSAGES/arch/cache/cache.en.po | 35 + .../ja/LC_MESSAGES/arch/cache/ram-cache.en.po | 271 + .../LC_MESSAGES/arch/cache/tier-storage.en.po | 224 + .../ja/LC_MESSAGES/arch/hacking/index.en.po | 42 + doc/locale/ja/LC_MESSAGES/arch/index.en.po | 62 + .../LC_MESSAGES/arch/proposals/hostdb.en.po | 372 + doc/locale/ja/LC_MESSAGES/glossary.en.po | 284 + doc/locale/ja/LC_MESSAGES/index.po | 73 + .../ja/LC_MESSAGES/reference/api/TSAPI.en.po | 320 + .../LC_MESSAGES/reference/api/TSDebug.en.po | 117 + .../reference/api/TSHttpHookAdd.en.po | 132 + .../api/TSHttpOverridableConfig.en.po | 108 + .../reference/api/TSHttpParserCreate.en.po | 122 + .../reference/api/TSHttpTxnMilestoneGet.en.po | 259 + .../reference/api/TSIOBufferCreate.en.po | 123 + .../reference/api/TSInstallDirGet.en.po | 84 + .../reference/api/TSLifecycleHookAdd.en.po | 192 + .../reference/api/TSMBufferCreate.en.po | 119 + .../api/TSMimeHdrFieldValueStringGet.en.po | 132 + .../reference/api/TSPluginInit.en.po | 92 + .../LC_MESSAGES/reference/api/TSRemap.en.po | 145 + .../api/TSTrafficServerVersionGet.en.po | 68 + .../LC_MESSAGES/reference/api/TSTypes.en.po | 121 + .../reference/api/TSUrlCreate.en.po | 113 + .../reference/api/TSUrlHostGet.en.po | 100 + .../reference/api/TSUrlHostSet.en.po | 92 + .../reference/api/TSUrlPercentEncode.en.po | 91 + .../reference/api/TSUrlStringGet.en.po | 88 + .../LC_MESSAGES/reference/api/TSmalloc.en.po | 137 + .../ja/LC_MESSAGES/reference/api/index.en.po | 31 + .../reference/commands/index.en.po | 31 + .../reference/commands/traffic_cop.en.po | 84 + .../reference/commands/traffic_line.en.po | 217 + .../reference/commands/traffic_logcat.en.po | 131 + .../reference/commands/traffic_logstats.en.po | 43 + .../reference/commands/traffic_manager.en.po | 39 + .../reference/commands/traffic_server.en.po | 47 + .../reference/commands/traffic_shell.en.po | 39 + .../reference/commands/traffic_top.en.po | 35 + .../reference/commands/tspush.en.po | 39 + .../reference/commands/tstop.en.po | 35 + .../LC_MESSAGES/reference/commands/tsxs.en.po | 39 + .../configuration/cache.config.en.po | 309 + .../configuration/congestion.config.en.po | 381 + .../configuration/hosting.config.en.po | 125 + .../reference/configuration/icp.config.en.po | 178 + .../reference/configuration/index.en.po | 31 + .../configuration/ip_allow.config.en.po | 94 + .../configuration/log_hosts.config.en.po | 83 + .../configuration/logs_xml.config.en.po | 527 + .../configuration/parent.config.en.po | 274 + .../configuration/plugin.config.en.po | 88 + .../configuration/records.config.en.po | 3488 ++ .../configuration/remap.config.en.po | 732 + .../configuration/splitdns.config.en.po | 196 + .../configuration/ssl_multicert.config.en.po | 291 + .../configuration/storage.config.en.po | 255 + .../configuration/update.config.en.po | 438 + .../configuration/volume.config.en.po | 112 + .../reference/plugins/authproxy.en.po | 124 + .../reference/plugins/balancer.en.po | 117 + .../reference/plugins/buffer_upload.en.po | 132 + .../reference/plugins/cacheurl.en.po | 135 + .../reference/plugins/combo_handler.en.po | 108 + .../reference/plugins/conf_remap.en.po | 83 + .../LC_MESSAGES/reference/plugins/esi.en.po | 35 + .../reference/plugins/geoip_acl.en.po | 116 + .../LC_MESSAGES/reference/plugins/gzip.en.po | 156 + .../reference/plugins/header_filter.en.po | 151 + .../reference/plugins/header_rewrite.en.po | 176 + .../LC_MESSAGES/reference/plugins/hipes.en.po | 212 + .../LC_MESSAGES/reference/plugins/index.en.po | 86 + .../reference/plugins/metalink.en.po | 163 + .../reference/plugins/mysql_remap.en.po | 122 + .../reference/plugins/regex_remap.en.po | 153 + .../plugins/stale_while_revalidate.en.po | 35 + .../reference/plugins/stats_over_http.en.po | 66 + .../reference/plugins/ts_lua.en.po | 470 + .../reference/plugins/xdebug.en.po | 79 + .../ja/LC_MESSAGES/sdk/actions-guide.en.po | 119 + .../sdk/actions-guide/hosts-lookup-api.en.po | 53 + .../LC_MESSAGES/sdk/adding-statistics.en.po | 87 + .../coupled-statistics.en.po | 130 + ...iewing-statistics-using-traffic-line.en.po | 50 + .../ja/LC_MESSAGES/sdk/continuations.en.po | 176 + .../how-to-activate-continuations.en.po | 56 + .../writing-handler-functions.en.po | 403 + .../ja/LC_MESSAGES/sdk/getting-started.en.po | 335 + .../sdk/getting-started/a-simple-plugin.en.po | 150 + .../getting-started/naming-conventions.en.po | 83 + ...in-registration-and-version-checking.en.po | 59 + .../sdk/header-based-plugin-examples.en.po | 108 + .../basic-authorization-plugin.en.po | 56 + ...-getting-a-handle-to-the-transaction.en.po | 37 + .../setting-a-transaction-hook.en.po | 75 + .../working-with-http-headers.en.po | 48 + .../blacklist-plugin.en.po | 97 + ...sing-the-transaction-being-processed.en.po | 68 + .../setting-a-global-hook.en.po | 53 + .../setting-up-a-transaction-hook.en.po | 88 + .../working-with-http-header-functions.en.po | 48 + .../how-to-create-trafficserver-plugins.en.po | 307 + .../roadmap-for-creating-plugins.en.po | 145 + .../ja/LC_MESSAGES/sdk/http-headers.en.po | 174 + ...-to-trafficserver-http-header-system.en.po | 62 + ...licate-mime-fields-are-not-coalesced.en.po | 49 + ...-belong-to-an-associated-mime-header.en.po | 71 + .../release-marshal-buffer-handles.en.po | 91 + .../sdk/http-headers/http-headers.en.po | 483 + .../sdk/http-headers/marshal-buffers.en.po | 115 + .../sdk/http-headers/mime-headers.en.po | 948 + .../LC_MESSAGES/sdk/http-headers/urls.en.po | 229 + .../sdk/http-hooks-and-transactions.en.po | 104 + .../adding-hooks.en.po | 280 + .../http-alternate-selection.en.po | 152 + .../http-sessions.en.po | 90 + .../http-transactions.en.po | 185 + .../initiate-http-connection.en.po | 43 + .../intercepting-http-transactions.en.po | 47 + .../sdk/http-transformation-plugin.en.po | 199 + .../append-transform-plugin.en.po | 223 + ...-buffered-null-transformation-plugin.en.po | 206 + .../sample-null-transformation-plugin.en.po | 190 + doc/locale/ja/LC_MESSAGES/sdk/index.en.po | 70 + doc/locale/ja/LC_MESSAGES/sdk/io-guide.en.po | 299 + .../sdk/io-guide/guide-to-cache-api.en.po | 78 + .../io-guide/guide-to-cache-api/errors.en.po | 51 + .../io-guide/guide-to-cache-api/example.en.po | 85 + .../how-to-do-a-cache-remove.en.po | 50 + .../how-to-do-a-cache-write.en.po | 53 + .../LC_MESSAGES/sdk/io-guide/io-buffers.en.po | 76 + .../sdk/io-guide/net-vconnections.en.po | 52 + .../sdk/io-guide/transformations.en.po | 222 + .../ja/LC_MESSAGES/sdk/io-guide/vios.en.po | 120 + .../sdk/misc-interface-guide.en.po | 87 + .../memory-allocation.en.po | 83 + .../thread-functions.en.po | 69 + .../misc-interface-guide/tsfopen-family.en.po | 94 + .../ja/LC_MESSAGES/sdk/mutex-guide.en.po | 321 + .../sdk/new-protocol-plugins.en.po | 540 + .../sdk/plugin-configurations.en.po | 106 + .../LC_MESSAGES/sdk/plugin-management.en.po | 35 + .../guide-to-the-logging-api.en.po | 157 + ...rafficserver-settings-and-statistics.en.po | 94 + doc/locale/ja/LC_MESSAGES/sdk/preface.en.po | 58 + .../sdk/preface/how-to-use-this-book.en.po | 218 + .../preface/typographical-conventions.en.po | 69 + .../ja/LC_MESSAGES/sdk/remap-plugin.en.po | 111 + .../remap-plugin/example-query-remap.en.po | 107 + .../LC_MESSAGES/sdk/sample-source-code.en.po | 75 + .../sdk/troubleshooting-tips.en.po | 62 + .../debugging-memory-leaks.en.po | 45 + .../unable-to-debug-tags.en.po | 159 + .../unable-to-load-plugins.en.po | 65 + .../using-a-debugger.en.po | 55 + doc/mainpage.doc | 16 + doc/man/man_page_template.txt | 67 + doc/man/show_alarms.1 | 33 + doc/man/show_cache-stats.1 | 60 + doc/man/show_cache.1 | 70 + doc/man/show_cluster.1 | 35 + doc/man/show_dns-resolver.1 | 37 + doc/man/show_dns-stats.1 | 34 + doc/man/show_hostdb-stats.1 | 35 + doc/man/show_hostdb.1 | 40 + doc/man/show_http-stats.1 | 43 + doc/man/show_http-trans-stats.1 | 55 + doc/man/show_http.1 | 45 + doc/man/show_icp-stats.1 | 46 + doc/man/show_icp.1 | 73 + doc/man/show_logging-stats.1 | 39 + doc/man/show_logging.1 | 69 + doc/man/show_network.1 | 51 + doc/man/show_parent.1 | 50 + doc/man/show_port-tunnels.1 | 37 + doc/man/show_proxy-stats.1 | 42 + doc/man/show_proxy.1 | 32 + doc/man/show_remap.1 | 36 + doc/man/show_scheduled-update.1 | 53 + doc/man/show_security.1 | 37 + doc/man/show_socks.1 | 47 + doc/man/show_ssl.1 | 33 + doc/man/show_status.1 | 33 + doc/man/show_version.1 | 33 + doc/man/show_virtual-ip.1 | 35 + doc/manpages.py | 67 + doc/pip_requirements.txt | 2 + doc/reference/api/TSAPI.en.rst | 145 + doc/reference/api/TSActionCancel.en.rst | 30 + doc/reference/api/TSActionDone.en.rst | 30 + doc/reference/api/TSCacheRead.en.rst | 46 + doc/reference/api/TSCacheRemove.en.rst | 43 + doc/reference/api/TSCacheWrite.en.rst | 52 + doc/reference/api/TSConfigDataGet.en.rst | 30 + doc/reference/api/TSConfigGet.en.rst | 30 + doc/reference/api/TSConfigRelease.en.rst | 30 + doc/reference/api/TSConfigSet.en.rst | 30 + doc/reference/api/TSContCall.en.rst | 30 + doc/reference/api/TSContCreate.en.rst | 30 + doc/reference/api/TSContDataGet.en.rst | 30 + doc/reference/api/TSContDataSet.en.rst | 30 + doc/reference/api/TSContDestroy.en.rst | 30 + doc/reference/api/TSContMutexGet.en.rst | 30 + doc/reference/api/TSContSchedule.en.rst | 30 + doc/reference/api/TSDebug.en.rst | 93 + doc/reference/api/TSHostLookup.en.rst | 30 + .../api/TSHostLookupResultAddrGet.en.rst | 30 + doc/reference/api/TSHttpConnect.en.rst | 42 + doc/reference/api/TSHttpHdrClone.en.rst | 30 + doc/reference/api/TSHttpHdrCopy.en.rst | 45 + doc/reference/api/TSHttpHdrCreate.en.rst | 30 + doc/reference/api/TSHttpHdrDestroy.en.rst | 37 + doc/reference/api/TSHttpHdrLengthGet.en.rst | 30 + doc/reference/api/TSHttpHdrMethodGet.en.rst | 30 + doc/reference/api/TSHttpHdrMethodSet.en.rst | 30 + doc/reference/api/TSHttpHdrPrint.en.rst | 30 + doc/reference/api/TSHttpHdrReasonGet.en.rst | 30 + .../api/TSHttpHdrReasonLookup.en.rst | 30 + doc/reference/api/TSHttpHdrReasonSet.en.rst | 30 + doc/reference/api/TSHttpHdrStatusGet.en.rst | 30 + doc/reference/api/TSHttpHdrStatusSet.en.rst | 30 + doc/reference/api/TSHttpHdrTypeGet.en.rst | 30 + doc/reference/api/TSHttpHdrTypeSet.en.rst | 30 + doc/reference/api/TSHttpHdrUrlGet.en.rst | 30 + doc/reference/api/TSHttpHdrUrlSet.en.rst | 30 + doc/reference/api/TSHttpHdrVersionGet.en.rst | 30 + doc/reference/api/TSHttpHdrVersionSet.en.rst | 30 + doc/reference/api/TSHttpHookAdd.en.rst | 113 + .../api/TSHttpIsInternalRequest.en.rst | 59 + .../api/TSHttpOverridableConfig.en.rst | 155 + doc/reference/api/TSHttpParserCreate.en.rst | 89 + doc/reference/api/TSHttpSsnReenable.en.rst | 30 + .../api/TSHttpTxnCacheLookupStatusGet.en.rst | 30 + .../api/TSHttpTxnCacheLookupUrlGet.en.rst | 30 + .../api/TSHttpTxnCachedReqGet.en.rst | 30 + .../api/TSHttpTxnCachedRespGet.en.rst | 30 + .../api/TSHttpTxnClientReqGet.en.rst | 30 + .../api/TSHttpTxnClientRespGet.en.rst | 30 + .../api/TSHttpTxnErrorBodySet.en.rst | 39 + doc/reference/api/TSHttpTxnIntercept.en.rst | 50 + .../api/TSHttpTxnMilestoneGet.en.rst | 85 + .../api/TSHttpTxnNextHopAddrGet.en.rst | 39 + .../api/TSHttpTxnParentProxySet.en.rst | 37 + doc/reference/api/TSHttpTxnReenable.en.rst | 42 + .../api/TSHttpTxnServerAddrGet.en.rst | 39 + .../api/TSHttpTxnServerIntercept.en.rst | 69 + .../api/TSHttpTxnServerReqGet.en.rst | 30 + .../api/TSHttpTxnServerRespGet.en.rst | 30 + doc/reference/api/TSHttpTxnSsnGet.en.rst | 30 + .../api/TSHttpTxnTransformRespGet.en.rst | 30 + .../api/TSHttpTxnTransformedRespCache.en.rst | 30 + .../TSHttpTxnUntransformedRespCache.en.rst | 30 + .../api/TSIOBufferBlockReadStart.en.rst | 30 + doc/reference/api/TSIOBufferCopy.en.rst | 30 + doc/reference/api/TSIOBufferCreate.en.rst | 90 + doc/reference/api/TSInstallDirGet.en.rst | 61 + doc/reference/api/TSLifecycleHookAdd.en.rst | 109 + doc/reference/api/TSMBufferCreate.en.rst | 97 + doc/reference/api/TSMgmtCounterGet.en.rst | 30 + doc/reference/api/TSMgmtFloatGet.en.rst | 30 + doc/reference/api/TSMgmtIntGet.en.rst | 30 + doc/reference/api/TSMgmtStringGet.en.rst | 30 + doc/reference/api/TSMgmtUpdateRegister.en.rst | 30 + doc/reference/api/TSMimeHdrClone.en.rst | 39 + doc/reference/api/TSMimeHdrCopy.en.rst | 42 + doc/reference/api/TSMimeHdrCreate.en.rst | 35 + doc/reference/api/TSMimeHdrDestroy.en.rst | 33 + doc/reference/api/TSMimeHdrFieldAppend.en.rst | 43 + doc/reference/api/TSMimeHdrFieldClone.en.rst | 30 + doc/reference/api/TSMimeHdrFieldCopy.en.rst | 30 + .../api/TSMimeHdrFieldCopyValues.en.rst | 30 + doc/reference/api/TSMimeHdrFieldCreate.en.rst | 30 + .../api/TSMimeHdrFieldDestroy.en.rst | 36 + doc/reference/api/TSMimeHdrFieldFind.en.rst | 41 + doc/reference/api/TSMimeHdrFieldGet.en.rst | 40 + .../api/TSMimeHdrFieldLengthGet.en.rst | 30 + .../api/TSMimeHdrFieldNameGet.en.rst | 30 + .../api/TSMimeHdrFieldNameSet.en.rst | 30 + doc/reference/api/TSMimeHdrFieldNext.en.rst | 30 + .../api/TSMimeHdrFieldNextDup.en.rst | 30 + doc/reference/api/TSMimeHdrFieldRemove.en.rst | 46 + .../api/TSMimeHdrFieldValueAppend.en.rst | 30 + .../api/TSMimeHdrFieldValueDateInsert.en.rst | 30 + .../api/TSMimeHdrFieldValueDateSet.en.rst | 30 + .../api/TSMimeHdrFieldValueIntSet.en.rst | 30 + .../api/TSMimeHdrFieldValueStringGet.en.rst | 113 + .../TSMimeHdrFieldValueStringInsert.en.rst | 30 + .../api/TSMimeHdrFieldValueStringSet.en.rst | 30 + .../api/TSMimeHdrFieldValueUintInsert.en.rst | 30 + .../api/TSMimeHdrFieldValueUintSet.en.rst | 30 + .../api/TSMimeHdrFieldValuesClear.en.rst | 30 + .../api/TSMimeHdrFieldValuesCount.en.rst | 30 + doc/reference/api/TSMimeHdrFieldsClear.en.rst | 34 + doc/reference/api/TSMimeHdrFieldsCount.en.rst | 34 + doc/reference/api/TSMimeHdrLengthGet.en.rst | 36 + doc/reference/api/TSMimeHdrParse.en.rst | 39 + doc/reference/api/TSMimeHdrPrint.en.rst | 34 + doc/reference/api/TSMimeParserClear.en.rst | 33 + doc/reference/api/TSMimeParserCreate.en.rst | 38 + doc/reference/api/TSMimeParserDestroy.en.rst | 33 + doc/reference/api/TSMutexCreate.en.rst | 30 + doc/reference/api/TSMutexLock.en.rst | 30 + doc/reference/api/TSMutexLockTry.en.rst | 30 + doc/reference/api/TSMutexUnlock.en.rst | 30 + doc/reference/api/TSNetAccept.en.rst | 30 + .../api/TSNetAcceptNamedProtocol.en.rst | 54 + doc/reference/api/TSNetConnect.en.rst | 30 + doc/reference/api/TSPluginInit.en.rst | 77 + doc/reference/api/TSRemap.en.rst | 87 + .../api/TSTextLogObjectCreate.en.rst | 54 + doc/reference/api/TSThreadCreate.en.rst | 30 + doc/reference/api/TSThreadDestroy.en.rst | 30 + doc/reference/api/TSThreadInit.en.rst | 30 + doc/reference/api/TSThreadSelf.en.rst | 30 + .../api/TSTrafficServerVersionGet.en.rst | 100 + doc/reference/api/TSTransformCreate.en.rst | 30 + .../api/TSTransformOutputVConnGet.en.rst | 30 + doc/reference/api/TSTypes.en.rst | 139 + doc/reference/api/TSUrlCreate.en.rst | 87 + doc/reference/api/TSUrlDestroy.en.rst | 43 + doc/reference/api/TSUrlFtpTypeGet.en.rst | 33 + doc/reference/api/TSUrlFtpTypeSet.en.rst | 34 + doc/reference/api/TSUrlHostGet.en.rst | 81 + doc/reference/api/TSUrlHostSet.en.rst | 76 + doc/reference/api/TSUrlPercentEncode.en.rst | 69 + doc/reference/api/TSUrlStringGet.en.rst | 69 + doc/reference/api/TSVConnAbort.en.rst | 30 + .../api/TSVConnCacheObjectSizeGet.en.rst | 30 + doc/reference/api/TSVConnClose.en.rst | 30 + doc/reference/api/TSVConnClosedGet.en.rst | 30 + doc/reference/api/TSVConnFdCreate.en.rst | 63 + doc/reference/api/TSVConnRead.en.rst | 30 + doc/reference/api/TSVConnReadVIOGet.en.rst | 30 + doc/reference/api/TSVConnShutdown.en.rst | 30 + doc/reference/api/TSVConnWrite.en.rst | 30 + doc/reference/api/TSVConnWriteVIOGet.en.rst | 30 + doc/reference/api/TSVIOBufferGet.en.rst | 30 + doc/reference/api/TSVIOContGet.en.rst | 30 + doc/reference/api/TSVIOMutexGet.en.rst | 30 + doc/reference/api/TSVIONBytesGet.en.rst | 30 + doc/reference/api/TSVIONBytesSet.en.rst | 30 + doc/reference/api/TSVIONDoneGet.en.rst | 30 + doc/reference/api/TSVIONDoneSet.en.rst | 30 + doc/reference/api/TSVIONTodoGet.en.rst | 30 + doc/reference/api/TSVIOReaderGet.en.rst | 30 + doc/reference/api/TSVIOReenable.en.rst | 30 + doc/reference/api/TSVIOVConnGet.en.rst | 30 + doc/reference/api/TSfclose.en.rst | 36 + doc/reference/api/TSfflush.en.rst | 34 + doc/reference/api/TSfgets.en.rst | 38 + doc/reference/api/TSfopen.en.rst | 37 + doc/reference/api/TSfread.en.rst | 34 + doc/reference/api/TSfwrite.en.rst | 39 + doc/reference/api/TSmalloc.en.rst | 81 + doc/reference/api/index.en.rst | 25 + doc/reference/commands/index.en.rst | 33 + doc/reference/commands/traffic_cop.en.rst | 68 + doc/reference/commands/traffic_line.en.rst | 260 + doc/reference/commands/traffic_logcat.en.rst | 106 + .../commands/traffic_logstats.en.rst | 68 + doc/reference/commands/traffic_manager.en.rst | 51 + doc/reference/commands/traffic_server.en.rst | 82 + doc/reference/commands/traffic_shell.en.rst | 29 + doc/reference/commands/traffic_top.en.rst | 27 + doc/reference/commands/tspush.en.rst | 36 + doc/reference/commands/tsxs.en.rst | 49 + .../configuration/cache.config.en.rst | 196 + .../configuration/congestion.config.en.rst | 198 + .../configuration/hosting.config.en.rst | 99 + doc/reference/configuration/icp.config.en.rst | 99 + doc/reference/configuration/index.en.rst | 43 + .../configuration/ip_allow.config.en.rst | 80 + .../configuration/log_hosts.config.en.rst | 66 + .../configuration/logs_xml.config.en.rst | 391 + .../configuration/parent.config.en.rst | 165 + .../configuration/plugin.config.en.rst | 75 + .../configuration/records.config.en.rst | 2404 ++ .../configuration/remap.config.en.rst | 479 + .../configuration/splitdns.config.en.rst | 129 + .../configuration/ssl_multicert.config.en.rst | 191 + .../configuration/storage.config.en.rst | 146 + .../configuration/update.config.en.rst | 213 + .../configuration/volume.config.en.rst | 69 + doc/reference/plugins/authproxy.en.rst | 91 + doc/reference/plugins/background_fetch.en.rst | 73 + doc/reference/plugins/balancer.en.rst | 75 + doc/reference/plugins/buffer_upload.en.rst | 82 + doc/reference/plugins/cacheurl.en.rst | 78 + doc/reference/plugins/combo_handler.en.rst | 85 + doc/reference/plugins/conf_remap.en.rst | 51 + doc/reference/plugins/esi.en.rst | 152 + doc/reference/plugins/geoip_acl.en.rst | 96 + doc/reference/plugins/gzip.en.rst | 92 + doc/reference/plugins/header_rewrite.en.rst | 186 + doc/reference/plugins/hipes.en.rst | 126 + doc/reference/plugins/index.en.rst | 80 + doc/reference/plugins/metalink.en.rst | 125 + doc/reference/plugins/mysql_remap.en.rst | 85 + doc/reference/plugins/regex_remap.en.rst | 142 + doc/reference/plugins/s3_auth.en.rst | 84 + .../plugins/stale_while_revalidate.en.rst | 23 + doc/reference/plugins/stats_over_http.en.rst | 52 + doc/reference/plugins/tcpinfo.en.rst | 122 + doc/reference/plugins/ts_lua.en.rst | 2084 ++ doc/reference/plugins/xdebug.en.rst | 47 + doc/sbuild | 27 + doc/sdk/actions-guide.en.rst | 178 + doc/sdk/actions-guide/hosts-lookup-api.en.rst | 28 + doc/sdk/adding-statistics.en.rst | 70 + .../coupled-statistics.en.rst | 117 + ...ewing-statistics-using-traffic-line.en.rst | 35 + doc/sdk/continuations.en.rst | 131 + .../how-to-activate-continuations.en.rst | 36 + .../writing-handler-functions.en.rst | 125 + doc/sdk/getting-started.en.rst | 244 + .../getting-started/a-simple-plugin.en.rst | 122 + .../getting-started/naming-conventions.en.rst | 51 + ...n-registration-and-version-checking.en.rst | 85 + doc/sdk/header-based-plugin-examples.en.rst | 70 + .../basic-authorization-plugin.en.rst | 44 + ...getting-a-handle-to-the-transaction.en.rst | 44 + .../setting-a-transaction-hook.en.rst | 55 + .../working-with-http-headers.en.rst | 96 + .../blacklist-plugin.en.rst | 106 + ...ing-the-transaction-being-processed.en.rst | 60 + .../setting-a-global-hook.en.rst | 36 + .../setting-up-a-transaction-hook.en.rst | 80 + .../working-with-http-header-functions.en.rst | 61 + ...how-to-create-trafficserver-plugins.en.rst | 241 + .../roadmap-for-creating-plugins.en.rst | 86 + doc/sdk/http-headers.en.rst | 140 + ...to-trafficserver-http-header-system.en.rst | 58 + ...icate-mime-fields-are-not-coalesced.en.rst | 31 + ...belong-to-an-associated-mime-header.en.rst | 67 + .../release-marshal-buffer-handles.en.rst | 79 + doc/sdk/http-headers/http-headers.en.rst | 186 + doc/sdk/http-headers/marshal-buffers.en.rst | 51 + doc/sdk/http-headers/mime-headers.en.rst | 437 + doc/sdk/http-headers/urls.en.rst | 132 + doc/sdk/http-hooks-and-transactions.en.rst | 144 + .../adding-hooks.en.rst | 151 + .../http-alternate-selection.en.rst | 189 + .../http-sessions.en.rst | 52 + .../http-transactions.en.rst | 203 + .../initiate-http-connection.en.rst | 25 + .../intercepting-http-transactions.en.rst | 30 + doc/sdk/http-transformation-plugin.en.rst | 165 + .../append-transform-plugin.en.rst | 144 + ...buffered-null-transformation-plugin.en.rst | 212 + .../sample-null-transformation-plugin.en.rst | 217 + doc/sdk/index.en.rst | 69 + doc/sdk/io-guide.en.rst | 192 + doc/sdk/io-guide/guide-to-cache-api.en.rst | 68 + .../io-guide/guide-to-cache-api/errors.en.rst | 31 + .../guide-to-cache-api/example.en.rst | 73 + .../how-to-do-a-cache-remove.en.rst | 31 + .../how-to-do-a-cache-write.en.rst | 34 + doc/sdk/io-guide/io-buffers.en.rst | 52 + doc/sdk/io-guide/net-vconnections.en.rst | 31 + doc/sdk/io-guide/transformations.en.rst | 179 + doc/sdk/io-guide/vios.en.rst | 58 + doc/sdk/misc-interface-guide.en.rst | 60 + .../memory-allocation.en.rst | 46 + .../thread-functions.en.rst | 34 + .../tsfopen-family.en.rst | 54 + doc/sdk/mutex-guide.en.rst | 401 + doc/sdk/new-protocol-plugins.en.rst | 372 + doc/sdk/plugin-configurations.en.rst | 115 + doc/sdk/plugin-management.en.rst | 28 + .../guide-to-the-logging-api.en.rst | 112 + ...afficserver-settings-and-statistics.en.rst | 55 + doc/sdk/preface.en.rst | 47 + doc/sdk/preface/how-to-use-this-book.en.rst | 130 + .../preface/typographical-conventions.en.rst | 36 + doc/sdk/remap-plugin.en.rst | 75 + .../remap-plugin/example-query-remap.en.rst | 150 + doc/sdk/sample-source-code.en.rst | 308 + doc/sdk/skeleton | 134 + doc/sdk/troubleshooting-tips.en.rst | 55 + .../debugging-memory-leaks.en.rst | 31 + .../unable-to-debug-tags.en.rst | 109 + .../unable-to-load-plugins.en.rst | 36 + .../using-a-debugger.en.rst | 39 + doc/static/images/admin/alarm.jpg | Bin 0 -> 95132 bytes doc/static/images/admin/ats-basic-traffic.png | Bin 0 -> 31464 bytes .../images/admin/ats-traffic-bridge.png | Bin 0 -> 22915 bytes doc/static/images/admin/cache_hit.jpg | Bin 0 -> 79909 bytes doc/static/images/admin/cache_miss.jpg | Bin 0 -> 108282 bytes doc/static/images/admin/cachehrc.jpg | Bin 0 -> 76254 bytes doc/static/images/admin/configur.jpg | Bin 0 -> 102958 bytes doc/static/images/admin/dash.jpg | Bin 0 -> 111902 bytes doc/static/images/admin/dash3.jpg | Bin 0 -> 105424 bytes doc/static/images/admin/firewall.jpg | Bin 0 -> 54553 bytes doc/static/images/admin/httprvs.jpg | Bin 0 -> 78185 bytes doc/static/images/admin/logcolat.jpg | Bin 0 -> 112020 bytes doc/static/images/admin/monitor.jpg | Bin 0 -> 95403 bytes .../images/admin/netscape_common_format.jpg | Bin 0 -> 29378 bytes .../admin/netscape_extended2_format.jpg | Bin 0 -> 42918 bytes .../images/admin/netscape_extended_format.jpg | Bin 0 -> 37438 bytes doc/static/images/admin/process.jpg | Bin 0 -> 136229 bytes doc/static/images/admin/records.jpg | Bin 0 -> 166227 bytes doc/static/images/admin/revproxy.jpg | Bin 0 -> 141814 bytes doc/static/images/admin/squid_format.jpg | Bin 0 -> 30949 bytes doc/static/images/admin/ssl_c.jpg | Bin 0 -> 71033 bytes doc/static/images/admin/ssl_os.jpg | Bin 0 -> 75656 bytes doc/static/images/admin/stat.jpg | Bin 0 -> 48740 bytes .../images/admin/summary-mixt.log_file.jpg | Bin 0 -> 32797 bytes doc/static/images/admin/wccp-shared-outer.png | Bin 0 -> 54957 bytes doc/static/images/companies/akamai.png | Bin 0 -> 7655 bytes doc/static/images/companies/betfair.png | Bin 0 -> 4329 bytes doc/static/images/companies/brainswear.png | Bin 0 -> 6452 bytes doc/static/images/companies/comcast.png | Bin 0 -> 5504 bytes doc/static/images/companies/devaus.png | Bin 0 -> 3545 bytes doc/static/images/companies/frisch.png | Bin 0 -> 7873 bytes doc/static/images/companies/godaddy.png | Bin 0 -> 9593 bytes doc/static/images/companies/guruve.png | Bin 0 -> 3124 bytes doc/static/images/companies/internode.png | Bin 0 -> 8323 bytes doc/static/images/companies/linkedin.png | Bin 0 -> 7665 bytes doc/static/images/companies/omniti.png | Bin 0 -> 5032 bytes doc/static/images/companies/powerhttp.png | Bin 0 -> 5340 bytes doc/static/images/companies/sic.png | Bin 0 -> 9999 bytes doc/static/images/companies/suomen.png | Bin 0 -> 7851 bytes doc/static/images/companies/taobao.png | Bin 0 -> 7951 bytes doc/static/images/companies/vng.png | Bin 0 -> 4258 bytes doc/static/images/companies/yahoo.png | Bin 0 -> 5014 bytes doc/static/images/docbook/caution.png | Bin 0 -> 1250 bytes doc/static/images/docbook/important.png | Bin 0 -> 722 bytes doc/static/images/docbook/note.png | Bin 0 -> 490 bytes doc/static/images/docbook/tip.png | Bin 0 -> 449 bytes doc/static/images/docbook/ts75.png | Bin 0 -> 9520 bytes doc/static/images/download_background.png | Bin 0 -> 290 bytes doc/static/images/download_ts.png | Bin 0 -> 4560 bytes doc/static/images/favicon.ico | Bin 0 -> 32988 bytes doc/static/images/grey_paper.jpg | Bin 0 -> 7290 bytes doc/static/images/press/trafficserver.svg | 164 + .../images/press/ts_logo_tm_1024x196.png | Bin 0 -> 21235 bytes .../images/press/ts_logo_tm_1960x375.png | Bin 0 -> 41128 bytes doc/static/images/press/ts_logo_tm_380x73.png | Bin 0 -> 7500 bytes doc/static/images/press/tsicon.png | Bin 0 -> 4328 bytes doc/static/images/press/tsicon_large.png | Bin 0 -> 8307 bytes doc/static/images/press/tsicon_small.png | Bin 0 -> 2132 bytes .../images/sdk/Protocol_state_diagram.jpg | Bin 0 -> 62342 bytes doc/static/images/sdk/Uses.jpg | Bin 0 -> 96105 bytes doc/static/images/sdk/blacklist75.jpg | Bin 0 -> 93834 bytes doc/static/images/sdk/event_sys80.jpg | Bin 0 -> 70729 bytes doc/static/images/sdk/evt_plugin120.jpg | Bin 0 -> 82354 bytes doc/static/images/sdk/http_header_struct.jpg | Bin 0 -> 41668 bytes doc/static/images/sdk/http_headers.jpg | Bin 0 -> 83292 bytes doc/static/images/sdk/http_state2.jpg | Bin 0 -> 147802 bytes doc/static/images/sdk/marshall_buffers.jpg | Bin 0 -> 90358 bytes doc/static/images/sdk/plugin_process.jpg | Bin 0 -> 34531 bytes doc/static/images/sdk/protocol_evt.jpg | Bin 0 -> 61809 bytes doc/static/images/sdk/protocol_sm_big.jpg | Bin 0 -> 59301 bytes doc/static/images/sdk/transact75.jpg | Bin 0 -> 49702 bytes doc/static/images/sdk/transact_hook75.jpg | Bin 0 -> 84733 bytes doc/static/images/sdk/txn_sm.jpg | Bin 0 -> 94051 bytes doc/static/images/sdk/vconn_buf_output.jpg | Bin 0 -> 44215 bytes doc/static/images/sdk/vconn_buffer.jpg | Bin 0 -> 64914 bytes doc/static/images/sdk/vconnection.jpg | Bin 0 -> 50918 bytes doc/static/images/sdk/vconnection1.jpg | Bin 0 -> 81340 bytes doc/static/images/sdk/vconnection2.jpg | Bin 0 -> 57055 bytes doc/static/images/trans_logo_148x132.png | Bin 0 -> 2356 bytes doc/static/images/trans_logo_150x30.gif | Bin 0 -> 1514 bytes doc/static/images/trans_logo_150x30.png | Bin 0 -> 1285 bytes doc/static/images/trans_logo_150x32.png | Bin 0 -> 2080 bytes doc/static/images/trans_logo_160x35.png | Bin 0 -> 2090 bytes doc/static/images/trans_logo_250x49.gif | Bin 0 -> 2287 bytes doc/static/images/trans_logo_250x49.png | Bin 0 -> 2072 bytes doc/static/images/trans_logo_350x69.gif | Bin 0 -> 3180 bytes doc/static/images/trans_logo_350x69.png | Bin 0 -> 2910 bytes doc/static/images/trans_logo_420x95.png | Bin 0 -> 4213 bytes doc/static/images/trans_logo_603x132.png | Bin 0 -> 5665 bytes doc/static/images/trans_logo_tm_380x69.png | Bin 0 -> 4192 bytes doc/static/images/ts75.png | Bin 0 -> 7340 bytes doc/static/images/warning.png | Bin 0 -> 394 bytes emacs-style | 31 + example/Makefile.am | 80 + example/Makefile.in | 1585 + example/add-header/add-header.c | 199 + example/add-header/readme.txt | 49 + example/app-template/app-template.cc | 238 + example/app-template/app-template.h | 48 + example/app-template/records.config.in | 73 + example/append-transform/append-transform.c | 380 + example/append-transform/readme.txt | 88 + example/basic-auth/basic-auth.c | 244 + example/basic-auth/readme.txt | 56 + example/blacklist-0/blacklist-0.c | 174 + example/blacklist-1/blacklist-1.c | 342 + example/blacklist-1/blacklist.txt | 2 + example/blacklist-1/readme.txt | 17 + example/bnull-transform/bnull-transform.c | 332 + example/cache-scan/cache-scan.cc | 495 + example/file-1/file-1.c | 68 + example/file-1/readme.txt | 35 + example/hello/hello.c | 43 + example/intercept/intercept.cc | 553 + example/lifecycle-plugin/lifecycle-plugin.c | 114 + example/lifecycle-plugin/readme.txt | 1 + example/null-transform/null-transform.c | 328 + example/null-transform/readme.txt | 112 + example/output-header/output-header.c | 170 + example/output-header/readme | 5 + example/prefetch/prefetch-plugin-eg1.c | 86 + example/prefetch/readme.txt | 26 + example/prefetch/test-hns-plugin.c | 248 + example/protocol/Protocol.c | 154 + example/protocol/Protocol.h | 47 + example/protocol/README.txt | 52 + example/protocol/TxnSM.c | 979 + example/protocol/TxnSM.h | 84 + example/protocol/test/ProtocolClientTest.java | 198 + example/protocol/test/ProtocolServerTest.java | 181 + example/protocol/test/file_gen.sh | 25 + example/query-remap/query-remap.c | 188 + example/redirect-1/readme.txt | 9 + example/redirect-1/redirect-1.c | 399 + example/remap/build.txt | 17 + example/remap/remap.cc | 364 + example/replace-header/replace-header.c | 110 + example/response-header-1/response-header-1.c | 303 + example/secure-link/readme.txt | 19 + example/secure-link/secure-link.c | 201 + example/server-transform/server-transform.c | 659 + example/session-1/session-1.c | 114 + example/thread-1/readme.txt | 20 + example/thread-1/thread-1.c | 82 + example/thread-pool/README.txt | 133 + example/thread-pool/TESTPLAN.txt | 74 + example/thread-pool/include/Makefile.am | 23 + example/thread-pool/include/gen.c | 40 + example/thread-pool/include/gen_inc.sh | 43 + example/thread-pool/psi.c | 1038 + .../test/SDKTest/SDKtest_server.config | 15 + example/thread-pool/test/SDKTest/psi_server.c | 195 + .../thread-pool/test/SynTest/Tests/Psi/1.cfg | 50 + .../thread-pool/test/SynTest/Tests/Psi/10.cfg | 51 + .../thread-pool/test/SynTest/Tests/Psi/11.cfg | 49 + .../thread-pool/test/SynTest/Tests/Psi/12.cfg | 45 + .../thread-pool/test/SynTest/Tests/Psi/13.cfg | 51 + .../thread-pool/test/SynTest/Tests/Psi/2.cfg | 50 + .../thread-pool/test/SynTest/Tests/Psi/3.cfg | 50 + .../thread-pool/test/SynTest/Tests/Psi/4.cfg | 50 + .../thread-pool/test/SynTest/Tests/Psi/5.cfg | 50 + .../thread-pool/test/SynTest/Tests/Psi/6.cfg | 50 + .../thread-pool/test/SynTest/Tests/Psi/7.cfg | 50 + .../thread-pool/test/SynTest/Tests/Psi/8.cfg | 50 + .../thread-pool/test/SynTest/Tests/Psi/9.cfg | 50 + .../SynTest/Tests/Psi/psi_files/tc10_file.txt | 3 + .../SynTest/Tests/Psi/psi_files/tc11_file.txt | 2 + .../SynTest/Tests/Psi/psi_files/tc12_file.txt | 3 + .../SynTest/Tests/Psi/psi_files/tc13_file.txt | 3 + .../SynTest/Tests/Psi/psi_files/tc1_file.txt | 2 + .../SynTest/Tests/Psi/psi_files/tc2_file.txt | 1 + .../SynTest/Tests/Psi/psi_files/tc3_file.txt | 2 + .../SynTest/Tests/Psi/psi_files/tc4_file.txt | 5 + .../SynTest/Tests/Psi/psi_files/tc5_file.txt | 3 + .../SynTest/Tests/Psi/psi_files/tc6_file.txt | 1 + .../SynTest/Tests/Psi/psi_files/tc7_file.txt | 3 + .../SynTest/Tests/Psi/psi_files/tc8_file.txt | 3 + .../SynTest/Tests/Psi/psi_files/tc9_file.txt | 2 + example/thread-pool/test/SynTest/system.cfg | 44 + .../thread-pool/test/SynTest/tests_psi.cfg | 61 + example/thread-pool/thread.c | 190 + example/thread-pool/thread.h | 89 + example/version/version.c | 67 + iocore/Makefile.am | 19 + iocore/Makefile.in | 867 + iocore/aio/AIO.cc | 671 + iocore/aio/I_AIO.h | 163 + iocore/aio/Inline.cc | 30 + iocore/aio/Makefile.am | 59 + iocore/aio/Makefile.in | 1309 + iocore/aio/P_AIO.h | 173 + iocore/aio/sample.cfg | 17 + iocore/aio/test_AIO.cc | 536 + iocore/aio/test_AIO.sample | 2 + iocore/cache/Cache.cc | 3508 ++ iocore/cache/CacheDir.cc | 1592 + iocore/cache/CacheDisk.cc | 411 + iocore/cache/CacheHosting.cc | 1202 + iocore/cache/CacheHttp.cc | 355 + iocore/cache/CacheLink.cc | 166 + iocore/cache/CachePages.cc | 641 + iocore/cache/CachePagesInternal.cc | 340 + iocore/cache/CacheRead.cc | 1218 + iocore/cache/CacheTest.cc | 489 + iocore/cache/CacheVol.cc | 496 + iocore/cache/CacheWrite.cc | 1965 + iocore/cache/I_Cache.h | 238 + iocore/cache/I_CacheDefs.h | 133 + iocore/cache/I_Store.h | 201 + iocore/cache/Inline.cc | 32 + iocore/cache/Makefile.am | 69 + iocore/cache/Makefile.in | 933 + iocore/cache/Notes | 32 + iocore/cache/P_Cache.h | 47 + iocore/cache/P_CacheArray.h | 182 + iocore/cache/P_CacheDir.h | 393 + iocore/cache/P_CacheDisk.h | 135 + iocore/cache/P_CacheHosting.h | 191 + iocore/cache/P_CacheHttp.h | 99 + iocore/cache/P_CacheInternal.h | 1358 + iocore/cache/P_CacheTest.h | 132 + iocore/cache/P_CacheVol.h | 942 + iocore/cache/P_RamCache.h | 44 + iocore/cache/RamCacheCLFUS.cc | 681 + iocore/cache/RamCacheLRU.cc | 210 + iocore/cache/Store.cc | 1194 + iocore/cluster/ClusterAPI.cc | 580 + iocore/cluster/ClusterCache.cc | 3202 ++ iocore/cluster/ClusterConfig.cc | 462 + iocore/cluster/ClusterHandler.cc | 3102 ++ iocore/cluster/ClusterHandlerBase.cc | 1463 + iocore/cluster/ClusterHash.cc | 175 + iocore/cluster/ClusterLib.cc | 366 + iocore/cluster/ClusterLoadMonitor.cc | 318 + iocore/cluster/ClusterMachine.cc | 284 + iocore/cluster/ClusterProcessor.cc | 847 + iocore/cluster/ClusterRPC.cc | 379 + iocore/cluster/ClusterVConnection.cc | 653 + iocore/cluster/Inline.cc | 30 + iocore/cluster/Makefile.am | 61 + iocore/cluster/Makefile.in | 915 + iocore/cluster/P_Cluster.h | 142 + iocore/cluster/P_ClusterCache.h | 1172 + iocore/cluster/P_ClusterCacheInternal.h | 828 + iocore/cluster/P_ClusterHandler.h | 675 + iocore/cluster/P_ClusterInline.h | 402 + iocore/cluster/P_ClusterInternal.h | 521 + iocore/cluster/P_ClusterLib.h | 72 + iocore/cluster/P_ClusterLoadMonitor.h | 118 + iocore/cluster/P_ClusterMachine.h | 135 + iocore/cluster/P_TimeTrace.h | 83 + iocore/cluster/test_I_Cluster.cc | 147 + iocore/cluster/test_P_Cluster.cc | 147 + iocore/dns/DNS.cc | 1721 + iocore/dns/DNSConnection.cc | 196 + iocore/dns/I_DNS.h | 40 + iocore/dns/I_DNSProcessor.h | 220 + iocore/dns/I_SplitDNS.h | 43 + iocore/dns/I_SplitDNSProcessor.h | 59 + iocore/dns/Inline.cc | 32 + iocore/dns/Makefile.am | 51 + iocore/dns/Makefile.in | 891 + iocore/dns/P_DNS.h | 42 + iocore/dns/P_DNSConnection.h | 120 + iocore/dns/P_DNSProcessor.h | 347 + iocore/dns/P_SplitDNS.h | 48 + iocore/dns/P_SplitDNSProcessor.h | 249 + iocore/dns/SRV.h | 74 + iocore/dns/SplitDNS.cc | 597 + iocore/dns/test_I_DNS.cc | 138 + iocore/dns/test_P_DNS.cc | 83 + iocore/eventsystem/EventSystem.cc | 48 + iocore/eventsystem/IOBuffer.cc | 269 + iocore/eventsystem/I_Action.h | 213 + iocore/eventsystem/I_Continuation.h | 200 + iocore/eventsystem/I_EThread.h | 346 + iocore/eventsystem/I_Event.h | 285 + iocore/eventsystem/I_EventProcessor.h | 324 + iocore/eventsystem/I_EventSystem.h | 56 + iocore/eventsystem/I_IOBuffer.h | 1376 + iocore/eventsystem/I_Lock.h | 585 + iocore/eventsystem/I_PriorityEventQueue.h | 125 + iocore/eventsystem/I_Processor.h | 117 + iocore/eventsystem/I_ProtectedQueue.h | 60 + iocore/eventsystem/I_ProxyAllocator.h | 124 + iocore/eventsystem/I_SocketManager.h | 179 + iocore/eventsystem/I_Tasks.h | 40 + iocore/eventsystem/I_Thread.h | 163 + iocore/eventsystem/I_VConnection.h | 405 + iocore/eventsystem/I_VIO.h | 220 + iocore/eventsystem/Inline.cc | 31 + iocore/eventsystem/Lock.cc | 112 + iocore/eventsystem/Makefile.am | 113 + iocore/eventsystem/Makefile.in | 1412 + iocore/eventsystem/PQ-List.cc | 63 + iocore/eventsystem/P_EventSystem.h | 54 + iocore/eventsystem/P_Freer.h | 138 + iocore/eventsystem/P_IOBuffer.h | 1199 + iocore/eventsystem/P_ProtectedQueue.h | 96 + iocore/eventsystem/P_Thread.h | 69 + iocore/eventsystem/P_UnixEThread.h | 173 + iocore/eventsystem/P_UnixEvent.h | 58 + iocore/eventsystem/P_UnixEventProcessor.h | 155 + iocore/eventsystem/P_UnixSocketManager.h | 550 + iocore/eventsystem/P_VConnection.h | 138 + iocore/eventsystem/P_VIO.h | 139 + iocore/eventsystem/Processor.cc | 90 + iocore/eventsystem/ProtectedQueue.cc | 169 + iocore/eventsystem/ProxyAllocator.cc | 53 + iocore/eventsystem/SocketManager.cc | 66 + iocore/eventsystem/Tasks.cc | 40 + iocore/eventsystem/Thread.cc | 106 + iocore/eventsystem/UnixEThread.cc | 305 + iocore/eventsystem/UnixEvent.cc | 98 + iocore/eventsystem/UnixEventProcessor.cc | 183 + iocore/eventsystem/test_Buffer.cc | 150 + iocore/eventsystem/test_Event.cc | 171 + iocore/eventsystem/test_I_Buffer.cc | 25 + iocore/eventsystem/test_I_Event.cc | 25 + iocore/eventsystem/test_P_Buffer.cc | 25 + iocore/eventsystem/test_P_Event.cc | 25 + iocore/hostdb/HostDB.cc | 2487 ++ iocore/hostdb/I_HostDB.h | 50 + iocore/hostdb/I_HostDBProcessor.h | 497 + iocore/hostdb/Inline.cc | 32 + iocore/hostdb/Makefile.am | 47 + iocore/hostdb/Makefile.in | 885 + iocore/hostdb/MultiCache.cc | 1430 + iocore/hostdb/P_HostDB.h | 59 + iocore/hostdb/P_HostDBProcessor.h | 560 + iocore/hostdb/P_MultiCache.h | 674 + iocore/hostdb/include/Machine.h | 145 + iocore/hostdb/test_I_HostDB.cc | 148 + iocore/hostdb/test_P_HostDB.cc | 83 + iocore/net/Connection.cc | 339 + iocore/net/I_Net.h | 89 + iocore/net/I_NetProcessor.h | 294 + iocore/net/I_NetVConnection.h | 517 + iocore/net/I_SessionAccept.h | 50 + iocore/net/I_Socks.h | 83 + iocore/net/I_UDPConnection.h | 110 + iocore/net/I_UDPNet.h | 113 + iocore/net/I_UDPPacket.h | 111 + iocore/net/Inline.cc | 31 + iocore/net/Makefile.am | 118 + iocore/net/Makefile.in | 1353 + iocore/net/Net.cc | 131 + iocore/net/NetTest-http-server.c | 195 + iocore/net/NetVCTest.cc | 418 + iocore/net/NetVConnection.cc | 46 + iocore/net/P_CompletionUtil.h | 47 + iocore/net/P_Connection.h | 196 + iocore/net/P_InkBulkIO.h | 179 + iocore/net/P_LibBulkIO.h | 176 + iocore/net/P_Net.h | 129 + iocore/net/P_NetAccept.h | 127 + iocore/net/P_NetVCTest.h | 152 + iocore/net/P_NetVConnection.h | 83 + iocore/net/P_SSLCertLookup.h | 53 + iocore/net/P_SSLConfig.h | 122 + iocore/net/P_SSLNetAccept.h | 67 + iocore/net/P_SSLNetProcessor.h | 83 + iocore/net/P_SSLNetVConnection.h | 138 + iocore/net/P_SSLNextProtocolAccept.h | 65 + iocore/net/P_SSLNextProtocolSet.h | 68 + iocore/net/P_SSLUtils.h | 120 + iocore/net/P_Socks.h | 166 + iocore/net/P_UDPConnection.h | 167 + iocore/net/P_UDPIOEvent.h | 95 + iocore/net/P_UDPNet.h | 341 + iocore/net/P_UDPPacket.h | 256 + iocore/net/P_UnixCompletionUtil.h | 104 + iocore/net/P_UnixNet.h | 618 + iocore/net/P_UnixNetProcessor.h | 98 + iocore/net/P_UnixNetState.h | 61 + iocore/net/P_UnixNetVConnection.h | 393 + iocore/net/P_UnixPollDescriptor.h | 142 + iocore/net/P_UnixUDPConnection.h | 120 + iocore/net/SSLCertLookup.cc | 347 + iocore/net/SSLConfig.cc | 327 + iocore/net/SSLNetAccept.cc | 77 + iocore/net/SSLNetProcessor.cc | 119 + iocore/net/SSLNetVConnection.cc | 762 + iocore/net/SSLNextProtocolAccept.cc | 170 + iocore/net/SSLNextProtocolSet.cc | 172 + iocore/net/SSLUtils.cc | 1481 + iocore/net/SessionAccept.cc | 41 + iocore/net/Socks.cc | 732 + iocore/net/UDPIOEvent.cc | 25 + iocore/net/UnixConnection.cc | 385 + iocore/net/UnixNet.cc | 448 + iocore/net/UnixNetAccept.cc | 563 + iocore/net/UnixNetPages.cc | 249 + iocore/net/UnixNetProcessor.cc | 477 + iocore/net/UnixNetVConnection.cc | 1208 + iocore/net/UnixUDPConnection.cc | 158 + iocore/net/UnixUDPNet.cc | 890 + iocore/net/test_I_Net.cc | 179 + iocore/net/test_I_UDPNet.cc | 214 + iocore/net/test_P_Net.cc | 112 + iocore/net/test_P_UDPNet.cc | 214 + iocore/net/test_certlookup.cc | 225 + iocore/utils/I_Machine.h | 100 + iocore/utils/I_OneWayMultiTunnel.h | 154 + iocore/utils/I_OneWayTunnel.h | 223 + iocore/utils/Machine.cc | 193 + iocore/utils/Makefile.am | 33 + iocore/utils/Makefile.in | 872 + iocore/utils/OneWayMultiTunnel.cc | 256 + iocore/utils/OneWayTunnel.cc | 390 + iocore/utils/diags.i | 121 + lib/Makefile.am | 57 + lib/Makefile.in | 896 + lib/atscppapi/Makefile.am | 17 + lib/atscppapi/Makefile.in | 866 + lib/atscppapi/examples/Makefile.am | 38 + lib/atscppapi/examples/Makefile.in | 887 + .../async_http_fetch/AsyncHttpFetch.cc | 192 + .../examples/async_http_fetch/Makefile.am | 30 + .../examples/async_http_fetch/Makefile.in | 906 + .../examples/async_timer/AsyncTimer.cc | 86 + .../examples/async_timer/Makefile.am | 29 + .../examples/async_timer/Makefile.in | 906 + lib/atscppapi/examples/boom/Makefile.am | 28 + lib/atscppapi/examples/boom/Makefile.in | 905 + lib/atscppapi/examples/boom/README.txt | 72 + lib/atscppapi/examples/boom/boom.cc | 417 + .../examples/clientredirect/ClientRedirect.cc | 76 + .../examples/clientredirect/Makefile.am | 28 + .../examples/clientredirect/Makefile.in | 906 + .../examples/clientrequest/ClientRequest.cc | 127 + .../examples/clientrequest/Makefile.am | 30 + .../examples/clientrequest/Makefile.in | 906 + .../examples/customresponse/CustomResponse.cc | 82 + .../examples/customresponse/Makefile.am | 29 + .../examples/customresponse/Makefile.in | 906 + .../examples/globalhook/GlobalHookPlugin.cc | 41 + lib/atscppapi/examples/globalhook/Makefile.am | 29 + lib/atscppapi/examples/globalhook/Makefile.in | 906 + .../GzipTransformationPlugin.cc | 170 + .../examples/gzip_transformation/Makefile.am | 30 + .../examples/gzip_transformation/Makefile.in | 907 + .../examples/helloworld/HelloWorldPlugin.cc | 36 + lib/atscppapi/examples/helloworld/Makefile.am | 29 + lib/atscppapi/examples/helloworld/Makefile.in | 906 + lib/atscppapi/examples/intercept/Makefile.am | 30 + lib/atscppapi/examples/intercept/Makefile.in | 906 + lib/atscppapi/examples/intercept/intercept.cc | 73 + .../InternalTransactionHandling.cc | 65 + .../internal_transaction_handling/Makefile.am | 29 + .../internal_transaction_handling/Makefile.in | 908 + .../examples/logger_example/LoggerExample.cc | 132 + .../examples/logger_example/Makefile.am | 30 + .../examples/logger_example/Makefile.in | 906 + .../multiple_transaction_hooks/Makefile.am | 28 + .../multiple_transaction_hooks/Makefile.in | 908 + .../MultipleTransactionHookPlugins.cc | 100 + .../null_transformation_plugin/Makefile.am | 30 + .../null_transformation_plugin/Makefile.in | 907 + .../NullTransformationPlugin.cc | 87 + .../examples/post_buffer/Makefile.am | 29 + .../examples/post_buffer/Makefile.in | 906 + .../examples/post_buffer/PostBuffer.cc | 75 + .../examples/remap_plugin/Makefile.am | 30 + .../examples/remap_plugin/Makefile.in | 906 + .../examples/remap_plugin/RemapPlugin.cc | 87 + .../examples/serverresponse/Makefile.am | 29 + .../examples/serverresponse/Makefile.in | 906 + .../examples/serverresponse/ServerResponse.cc | 110 + .../examples/stat_example/Makefile.am | 30 + .../examples/stat_example/Makefile.in | 906 + .../examples/stat_example/StatExample.cc | 71 + .../examples/timeout_example/Makefile.am | 30 + .../examples/timeout_example/Makefile.in | 907 + .../timeout_example/TimeoutExamplePlugin.cc | 54 + .../examples/transactionhook/Makefile.am | 30 + .../examples/transactionhook/Makefile.in | 907 + .../transactionhook/TransactionHookPlugin.cc | 60 + lib/atscppapi/src/AsyncHttpFetch.cc | 186 + lib/atscppapi/src/AsyncTimer.cc | 115 + .../src/CaseInsensitiveStringComparator.cc | 70 + lib/atscppapi/src/ClientRequest.cc | 74 + lib/atscppapi/src/GlobalPlugin.cc | 79 + .../src/GzipDeflateTransformation.cc | 154 + .../src/GzipInflateTransformation.cc | 131 + lib/atscppapi/src/Headers.cc | 562 + lib/atscppapi/src/HttpMethod.cc | 37 + lib/atscppapi/src/HttpVersion.cc | 29 + lib/atscppapi/src/InterceptPlugin.cc | 335 + lib/atscppapi/src/Logger.cc | 215 + lib/atscppapi/src/Makefile.am | 85 + lib/atscppapi/src/Makefile.in | 1015 + lib/atscppapi/src/Plugin.cc | 31 + lib/atscppapi/src/RemapPlugin.cc | 66 + lib/atscppapi/src/Request.cc | 168 + lib/atscppapi/src/Response.cc | 114 + lib/atscppapi/src/Stat.cc | 96 + lib/atscppapi/src/Transaction.cc | 312 + lib/atscppapi/src/TransactionPlugin.cc | 83 + lib/atscppapi/src/TransformationPlugin.cc | 336 + lib/atscppapi/src/Url.cc | 207 + lib/atscppapi/src/include/atscppapi/Async.h | 221 + .../src/include/atscppapi/AsyncHttpFetch.h | 105 + .../src/include/atscppapi/AsyncTimer.h | 80 + .../CaseInsensitiveStringComparator.h | 51 + .../src/include/atscppapi/ClientRequest.h | 59 + .../src/include/atscppapi/GlobalPlugin.h | 90 + .../atscppapi/GzipDeflateTransformation.h | 91 + .../atscppapi/GzipInflateTransformation.h | 92 + lib/atscppapi/src/include/atscppapi/Headers.h | 600 + .../src/include/atscppapi/HttpMethod.h | 59 + .../src/include/atscppapi/HttpStatus.h | 104 + .../src/include/atscppapi/HttpVersion.h | 52 + .../src/include/atscppapi/InterceptPlugin.h | 101 + lib/atscppapi/src/include/atscppapi/Logger.h | 268 + lib/atscppapi/src/include/atscppapi/Mutex.h | 250 + lib/atscppapi/src/include/atscppapi/Plugin.h | 106 + .../src/include/atscppapi/PluginInit.h | 57 + .../src/include/atscppapi/RemapPlugin.h | 70 + lib/atscppapi/src/include/atscppapi/Request.h | 72 + .../src/include/atscppapi/Response.h | 71 + lib/atscppapi/src/include/atscppapi/Stat.h | 106 + .../src/include/atscppapi/Transaction.h | 348 + .../src/include/atscppapi/TransactionPlugin.h | 113 + .../include/atscppapi/TransformationPlugin.h | 130 + lib/atscppapi/src/include/atscppapi/Url.h | 149 + .../src/include/atscppapi/noncopyable.h | 64 + .../src/include/atscppapi/shared_ptr.h | 51 + lib/atscppapi/src/include/atscppapi/utils.h | 72 + lib/atscppapi/src/include/logging_internal.h | 44 + lib/atscppapi/src/include/utils_internal.h | 99 + lib/atscppapi/src/utils.cc | 77 + lib/atscppapi/src/utils_internal.cc | 237 + lib/luajit/.gitignore | 11 + lib/luajit/COPYRIGHT | 56 + lib/luajit/Makefile | 151 + lib/luajit/README | 16 + lib/luajit/doc/bluequad-print.css | 166 + lib/luajit/doc/bluequad.css | 325 + lib/luajit/doc/changes.html | 928 + lib/luajit/doc/contact.html | 102 + lib/luajit/doc/ext_c_api.html | 187 + lib/luajit/doc/ext_ffi.html | 330 + lib/luajit/doc/ext_ffi_api.html | 566 + lib/luajit/doc/ext_ffi_semantics.html | 1245 + lib/luajit/doc/ext_ffi_tutorial.html | 601 + lib/luajit/doc/ext_jit.html | 199 + lib/luajit/doc/extensions.html | 408 + lib/luajit/doc/faq.html | 184 + lib/luajit/doc/img/contact.png | Bin 0 -> 1340 bytes lib/luajit/doc/install.html | 631 + lib/luajit/doc/luajit.html | 228 + lib/luajit/doc/running.html | 306 + lib/luajit/doc/status.html | 125 + lib/luajit/dynasm/dasm_arm.h | 456 + lib/luajit/dynasm/dasm_arm.lua | 1122 + lib/luajit/dynasm/dasm_mips.h | 416 + lib/luajit/dynasm/dasm_mips.lua | 953 + lib/luajit/dynasm/dasm_ppc.h | 412 + lib/luajit/dynasm/dasm_ppc.lua | 1249 + lib/luajit/dynasm/dasm_proto.h | 83 + lib/luajit/dynasm/dasm_x64.lua | 12 + lib/luajit/dynasm/dasm_x86.h | 471 + lib/luajit/dynasm/dasm_x86.lua | 1937 + lib/luajit/dynasm/dynasm.lua | 1094 + lib/luajit/etc/luajit.1 | 88 + lib/luajit/etc/luajit.pc | 25 + lib/luajit/src/.gitignore | 7 + lib/luajit/src/Makefile | 686 + lib/luajit/src/Makefile.dep | 226 + lib/luajit/src/host/.gitignore | 3 + lib/luajit/src/host/README | 4 + lib/luajit/src/host/buildvm.c | 516 + lib/luajit/src/host/buildvm.h | 104 + lib/luajit/src/host/buildvm_asm.c | 313 + lib/luajit/src/host/buildvm_fold.c | 229 + lib/luajit/src/host/buildvm_lib.c | 398 + lib/luajit/src/host/buildvm_peobj.c | 368 + lib/luajit/src/host/genminilua.lua | 428 + lib/luajit/src/host/minilua.c | 7770 ++++ lib/luajit/src/jit/.gitignore | 1 + lib/luajit/src/jit/bc.lua | 191 + lib/luajit/src/jit/bcsave.lua | 659 + lib/luajit/src/jit/dis_arm.lua | 689 + lib/luajit/src/jit/dis_mips.lua | 428 + lib/luajit/src/jit/dis_mipsel.lua | 20 + lib/luajit/src/jit/dis_ppc.lua | 591 + lib/luajit/src/jit/dis_x64.lua | 20 + lib/luajit/src/jit/dis_x86.lua | 836 + lib/luajit/src/jit/dump.lua | 699 + lib/luajit/src/jit/v.lua | 167 + lib/luajit/src/lauxlib.h | 167 + lib/luajit/src/lib_aux.c | 356 + lib/luajit/src/lib_base.c | 683 + lib/luajit/src/lib_bit.c | 74 + lib/luajit/src/lib_debug.c | 405 + lib/luajit/src/lib_ffi.c | 850 + lib/luajit/src/lib_init.c | 55 + lib/luajit/src/lib_io.c | 539 + lib/luajit/src/lib_jit.c | 663 + lib/luajit/src/lib_math.c | 233 + lib/luajit/src/lib_os.c | 280 + lib/luajit/src/lib_package.c | 602 + lib/luajit/src/lib_string.c | 940 + lib/luajit/src/lib_table.c | 300 + lib/luajit/src/lj.supp | 26 + lib/luajit/src/lj_alloc.c | 1390 + lib/luajit/src/lj_alloc.h | 17 + lib/luajit/src/lj_api.c | 1200 + lib/luajit/src/lj_arch.h | 426 + lib/luajit/src/lj_asm.c | 1914 + lib/luajit/src/lj_asm.h | 17 + lib/luajit/src/lj_asm_arm.h | 2361 ++ lib/luajit/src/lj_asm_mips.h | 1977 + lib/luajit/src/lj_asm_ppc.h | 2167 ++ lib/luajit/src/lj_asm_x86.h | 2794 ++ lib/luajit/src/lj_bc.c | 14 + lib/luajit/src/lj_bc.h | 261 + lib/luajit/src/lj_bcdump.h | 66 + lib/luajit/src/lj_bcread.c | 476 + lib/luajit/src/lj_bcwrite.c | 396 + lib/luajit/src/lj_carith.c | 351 + lib/luajit/src/lj_carith.h | 27 + lib/luajit/src/lj_ccall.c | 900 + lib/luajit/src/lj_ccall.h | 171 + lib/luajit/src/lj_ccallback.c | 644 + lib/luajit/src/lj_ccallback.h | 25 + lib/luajit/src/lj_cconv.c | 751 + lib/luajit/src/lj_cconv.h | 70 + lib/luajit/src/lj_cdata.c | 285 + lib/luajit/src/lj_cdata.h | 75 + lib/luajit/src/lj_char.c | 43 + lib/luajit/src/lj_char.h | 42 + lib/luajit/src/lj_clib.c | 409 + lib/luajit/src/lj_clib.h | 29 + lib/luajit/src/lj_cparse.c | 1872 + lib/luajit/src/lj_cparse.h | 65 + lib/luajit/src/lj_crecord.c | 1653 + lib/luajit/src/lj_crecord.h | 31 + lib/luajit/src/lj_ctype.c | 634 + lib/luajit/src/lj_ctype.h | 461 + lib/luajit/src/lj_debug.c | 603 + lib/luajit/src/lj_debug.h | 61 + lib/luajit/src/lj_def.h | 349 + lib/luajit/src/lj_dispatch.c | 494 + lib/luajit/src/lj_dispatch.h | 131 + lib/luajit/src/lj_emit_arm.h | 356 + lib/luajit/src/lj_emit_mips.h | 211 + lib/luajit/src/lj_emit_ppc.h | 238 + lib/luajit/src/lj_emit_x86.h | 466 + lib/luajit/src/lj_err.c | 785 + lib/luajit/src/lj_err.h | 41 + lib/luajit/src/lj_errmsg.h | 193 + lib/luajit/src/lj_ff.h | 18 + lib/luajit/src/lj_ffrecord.c | 888 + lib/luajit/src/lj_ffrecord.h | 24 + lib/luajit/src/lj_frame.h | 183 + lib/luajit/src/lj_func.c | 185 + lib/luajit/src/lj_func.h | 24 + lib/luajit/src/lj_gc.c | 849 + lib/luajit/src/lj_gc.h | 134 + lib/luajit/src/lj_gdbjit.c | 793 + lib/luajit/src/lj_gdbjit.h | 22 + lib/luajit/src/lj_ir.c | 501 + lib/luajit/src/lj_ir.h | 551 + lib/luajit/src/lj_ircall.h | 271 + lib/luajit/src/lj_iropt.h | 161 + lib/luajit/src/lj_jit.h | 416 + lib/luajit/src/lj_lex.c | 481 + lib/luajit/src/lj_lex.h | 85 + lib/luajit/src/lj_lib.c | 258 + lib/luajit/src/lj_lib.h | 112 + lib/luajit/src/lj_load.c | 168 + lib/luajit/src/lj_mcode.c | 373 + lib/luajit/src/lj_mcode.h | 30 + lib/luajit/src/lj_meta.c | 466 + lib/luajit/src/lj_meta.h | 37 + lib/luajit/src/lj_obj.c | 35 + lib/luajit/src/lj_obj.h | 856 + lib/luajit/src/lj_opt_dce.c | 77 + lib/luajit/src/lj_opt_fold.c | 2297 ++ lib/luajit/src/lj_opt_loop.c | 436 + lib/luajit/src/lj_opt_mem.c | 917 + lib/luajit/src/lj_opt_narrow.c | 648 + lib/luajit/src/lj_opt_sink.c | 245 + lib/luajit/src/lj_opt_split.c | 731 + lib/luajit/src/lj_parse.c | 2752 ++ lib/luajit/src/lj_parse.h | 18 + lib/luajit/src/lj_record.c | 2248 ++ lib/luajit/src/lj_record.h | 44 + lib/luajit/src/lj_snap.c | 865 + lib/luajit/src/lj_snap.h | 34 + lib/luajit/src/lj_state.c | 287 + lib/luajit/src/lj_state.h | 35 + lib/luajit/src/lj_str.c | 339 + lib/luajit/src/lj_str.h | 50 + lib/luajit/src/lj_strscan.c | 497 + lib/luajit/src/lj_strscan.h | 39 + lib/luajit/src/lj_tab.c | 631 + lib/luajit/src/lj_tab.h | 70 + lib/luajit/src/lj_target.h | 162 + lib/luajit/src/lj_target_arm.h | 274 + lib/luajit/src/lj_target_mips.h | 257 + lib/luajit/src/lj_target_ppc.h | 280 + lib/luajit/src/lj_target_x86.h | 342 + lib/luajit/src/lj_trace.c | 815 + lib/luajit/src/lj_trace.h | 53 + lib/luajit/src/lj_traceerr.h | 61 + lib/luajit/src/lj_udata.c | 34 + lib/luajit/src/lj_udata.h | 14 + lib/luajit/src/lj_vm.h | 116 + lib/luajit/src/lj_vmevent.c | 57 + lib/luajit/src/lj_vmevent.h | 59 + lib/luajit/src/lj_vmmath.c | 140 + lib/luajit/src/ljamalg.c | 93 + lib/luajit/src/lua.h | 393 + lib/luajit/src/lua.hpp | 9 + lib/luajit/src/luaconf.h | 156 + lib/luajit/src/luajit.c | 571 + lib/luajit/src/luajit.h | 70 + lib/luajit/src/lualib.h | 43 + lib/luajit/src/msvcbuild.bat | 113 + lib/luajit/src/ps4build.bat | 103 + lib/luajit/src/vm_arm.dasc | 4487 +++ lib/luajit/src/vm_mips.dasc | 4241 +++ lib/luajit/src/vm_ppc.dasc | 5137 +++ lib/luajit/src/vm_ppcspe.dasc | 3691 ++ lib/luajit/src/vm_x86.dasc | 6374 ++++ lib/luajit/src/xedkbuild.bat | 92 + lib/perl/MANIFEST | 9 + lib/perl/Makefile.PL | 29 + lib/perl/Makefile.am | 37 + lib/perl/Makefile.in | 707 + lib/perl/README | 51 + lib/perl/examples/forw_proxy_conf.pl | 41 + lib/perl/lib/Apache/TS.pm.in | 90 + lib/perl/lib/Apache/TS/AdminClient.pm | 766 + lib/perl/lib/Apache/TS/Config.pm | 37 + lib/perl/lib/Apache/TS/Config/Records.pm | 303 + lib/perl/t/Apache-TS-AdminClient.t | 35 + lib/records/I_RecAlarms.h | 50 + lib/records/I_RecCore.h | 305 + lib/records/I_RecDefs.h | 204 + lib/records/I_RecEvents.h | 42 + lib/records/I_RecHttp.h | 420 + lib/records/I_RecLocal.h | 37 + lib/records/I_RecMutex.h | 47 + lib/records/I_RecProcess.h | 164 + lib/records/I_RecSignals.h | 48 + lib/records/Makefile.am | 98 + lib/records/Makefile.in | 1348 + lib/records/P_RecCore.cc | 1042 + lib/records/P_RecCore.h | 107 + lib/records/P_RecDefs.h | 179 + lib/records/P_RecFile.h | 57 + lib/records/P_RecLocal.h | 29 + lib/records/P_RecMessage.h | 53 + lib/records/P_RecProcess.h | 44 + lib/records/P_RecTree.h | 93 + lib/records/P_RecUtils.h | 81 + lib/records/RecConfigParse.cc | 259 + lib/records/RecCore.cc | 1218 + lib/records/RecDebug.cc | 81 + lib/records/RecFile.cc | 306 + lib/records/RecHttp.cc | 661 + lib/records/RecLocal.cc | 185 + lib/records/RecMessage.cc | 386 + lib/records/RecMutex.cc | 70 + lib/records/RecProcess.cc | 889 + lib/records/RecTree.cc | 277 + lib/records/RecUtils.cc | 443 + lib/records/test_I_RecLocal.cc | 279 + lib/records/test_I_RecProcess.cc | 25 + lib/records/test_P_RecProcess.cc | 25 + lib/records/test_RecProcess.i | 670 + lib/records/test_RecTree.cc | 78 + lib/records/test_RecordsConfig.cc | 60 + lib/records/test_RecordsConfig.h | 28 + lib/records/test_records.config | 20 + lib/ts/Allocator.h | 224 + lib/ts/Arena.cc | 160 + lib/ts/Arena.h | 182 + lib/ts/Bitops.cc | 63 + lib/ts/Bitops.h | 275 + lib/ts/Compatability.h | 73 + lib/ts/CompileParseRules.cc | 146 + lib/ts/Diags.cc | 547 + lib/ts/Diags.h | 327 + lib/ts/DynArray.h | 191 + lib/ts/EventNotify.cc | 189 + lib/ts/EventNotify.h | 59 + lib/ts/HostLookup.cc | 1327 + lib/ts/HostLookup.h | 130 + lib/ts/INK_MD5.h | 149 + lib/ts/I_Layout.h | 107 + lib/ts/I_Version.h | 100 + lib/ts/InkErrno.h | 72 + lib/ts/InkPool.h | 96 + lib/ts/IntrusiveDList.h | 311 + lib/ts/IntrusivePtrTest.cc | 75 + lib/ts/IpMap.cc | 1405 + lib/ts/IpMap.h | 560 + lib/ts/IpMapConf.cc | 177 + lib/ts/IpMapConf.h | 31 + lib/ts/IpMapTest.cc | 295 + lib/ts/Layout.cc | 226 + lib/ts/List.h | 502 + lib/ts/Makefile.am | 215 + lib/ts/Makefile.in | 1723 + lib/ts/Map.h | 941 + lib/ts/MatcherUtils.cc | 607 + lib/ts/MatcherUtils.h | 134 + lib/ts/MimeTable.cc | 211 + lib/ts/MimeTable.h | 75 + lib/ts/ParseRules.cc | 309 + lib/ts/ParseRules.h | 848 + lib/ts/Ptr.h | 430 + lib/ts/RawHashTable.cc | 32 + lib/ts/RawHashTable.h | 400 + lib/ts/Regex.cc | 147 + lib/ts/Regex.h | 67 + lib/ts/Regression.cc | 214 + lib/ts/Regression.h | 111 + lib/ts/SimpleTokenizer.h | 291 + lib/ts/TestBox.h | 63 + lib/ts/TestHttpHeader.cc | 214 + lib/ts/TextBuffer.cc | 214 + lib/ts/TextBuffer.h | 61 + lib/ts/Tokenizer.cc | 364 + lib/ts/Tokenizer.h | 163 + lib/ts/Trie.h | 246 + lib/ts/TsBuffer.h | 411 + lib/ts/Vec.cc | 203 + lib/ts/Vec.h | 833 + lib/ts/Version.cc | 108 + lib/ts/apidefs.h.in | 1160 + lib/ts/defalloc.h | 34 + lib/ts/fastlz.c | 551 + lib/ts/fastlz.h | 100 + lib/ts/ink_aiocb.h | 63 + lib/ts/ink_align.h | 99 + lib/ts/ink_apidefs.h | 78 + lib/ts/ink_args.cc | 262 + lib/ts/ink_args.h | 79 + lib/ts/ink_assert.cc | 38 + lib/ts/ink_assert.h | 74 + lib/ts/ink_atomic.h | 177 + lib/ts/ink_auth_api.cc | 82 + lib/ts/ink_auth_api.h | 228 + lib/ts/ink_autoconf.h.in | 476 + lib/ts/ink_base64.cc | 167 + lib/ts/ink_base64.h | 49 + lib/ts/ink_cap.cc | 180 + lib/ts/ink_cap.h | 101 + lib/ts/ink_code.cc | 134 + lib/ts/ink_code.h | 44 + lib/ts/ink_config.h.in | 121 + lib/ts/ink_defs.cc | 138 + lib/ts/ink_defs.h | 165 + lib/ts/ink_error.cc | 259 + lib/ts/ink_error.h | 63 + lib/ts/ink_exception.h | 71 + lib/ts/ink_file.cc | 372 + lib/ts/ink_file.h | 106 + lib/ts/ink_hash_table.cc | 459 + lib/ts/ink_hash_table.h | 149 + lib/ts/ink_hrtime.cc | 222 + lib/ts/ink_hrtime.h | 336 + lib/ts/ink_inet.cc | 533 + lib/ts/ink_inet.h | 1284 + lib/ts/ink_inout.h | 76 + lib/ts/ink_llqueue.h | 63 + lib/ts/ink_lockfile.h | 102 + lib/ts/ink_memory.cc | 241 + lib/ts/ink_memory.h | 259 + lib/ts/ink_mutex.cc | 32 + lib/ts/ink_mutex.h | 130 + lib/ts/ink_platform.h | 216 + lib/ts/ink_queue.cc | 505 + lib/ts/ink_queue.h | 221 + lib/ts/ink_queue_ext.cc | 686 + lib/ts/ink_queue_ext.h | 143 + lib/ts/ink_queue_utils.cc | 80 + lib/ts/ink_rand.cc | 109 + lib/ts/ink_rand.h | 88 + lib/ts/ink_res_init.cc | 646 + lib/ts/ink_res_mkquery.cc | 537 + lib/ts/ink_resolver.h | 308 + lib/ts/ink_resource.cc | 37 + lib/ts/ink_resource.h | 53 + lib/ts/ink_rwlock.cc | 167 + lib/ts/ink_rwlock.h | 55 + lib/ts/ink_sock.cc | 254 + lib/ts/ink_sock.h | 66 + lib/ts/ink_sprintf.cc | 152 + lib/ts/ink_sprintf.h | 48 + lib/ts/ink_stack_trace.cc | 76 + lib/ts/ink_stack_trace.h | 40 + lib/ts/ink_string++.cc | 182 + lib/ts/ink_string++.h | 335 + lib/ts/ink_string.cc | 272 + lib/ts/ink_string.h | 457 + lib/ts/ink_sys_control.cc | 65 + lib/ts/ink_sys_control.h | 40 + lib/ts/ink_syslog.cc | 84 + lib/ts/ink_syslog.h | 37 + lib/ts/ink_thread.cc | 39 + lib/ts/ink_thread.h | 329 + lib/ts/ink_time.cc | 184 + lib/ts/ink_time.h | 95 + lib/ts/libts.h | 104 + lib/ts/llqueue.cc | 273 + lib/ts/load_http_hdr.cc | 436 + lib/ts/lockfile.cc | 266 + lib/ts/mkdfa.c | 661 + lib/ts/test_List.cc | 59 + lib/ts/test_Map.cc | 122 + lib/ts/test_Vec.cc | 129 + lib/ts/test_arena.cc | 126 + lib/ts/test_atomic.cc | 217 + lib/ts/test_freelist.cc | 87 + lib/ts/test_memchr.cc | 171 + lib/ts/test_strings.cc | 379 + lib/tsconfig/BisonHeaderToC++.sed | 8 + lib/tsconfig/Errata.cc | 297 + lib/tsconfig/Errata.h | 945 + lib/tsconfig/IntrusivePtr.h | 609 + lib/tsconfig/Makefile.am | 59 + lib/tsconfig/Makefile.in | 1034 + lib/tsconfig/NumericType.h | 182 + lib/tsconfig/TsBuilder.cc | 214 + lib/tsconfig/TsBuilder.h | 101 + lib/tsconfig/TsConfigGrammar.y | 121 + lib/tsconfig/TsConfigLexer.h | 61 + lib/tsconfig/TsConfigParseEvents.h | 67 + lib/tsconfig/TsConfigSyntax.l | 124 + lib/tsconfig/TsConfigTypes.h | 66 + lib/tsconfig/TsErrataUtil.cc | 174 + lib/tsconfig/TsErrataUtil.h | 169 + lib/tsconfig/TsValue.cc | 364 + lib/tsconfig/TsValue.h | 740 + lib/tsconfig/test-1.tsconfig | 18 + lib/tsconfig/test-tsconfig.cc | 49 + lib/wccp/Makefile.am | 42 + lib/wccp/Makefile.in | 882 + lib/wccp/Wccp.h | 512 + lib/wccp/WccpConfig.cc | 796 + lib/wccp/WccpEndPoint.cc | 1238 + lib/wccp/WccpLocal.h | 3555 ++ lib/wccp/WccpMeta.h | 203 + lib/wccp/WccpMsg.cc | 1783 + lib/wccp/WccpStatic.cc | 189 + lib/wccp/WccpUtil.h | 223 + lib/wccp/wccp-test-cache.cc | 196 + lib/wccp/wccp-test-router.cc | 157 + mgmt/AddConfigFilesHere.cc | 89 + mgmt/Alarms.cc | 576 + mgmt/Alarms.h | 146 + mgmt/BaseManager.cc | 169 + mgmt/BaseManager.h | 144 + mgmt/FileManager.cc | 808 + mgmt/FileManager.h | 152 + mgmt/LocalManager.cc | 1156 + mgmt/LocalManager.h | 158 + mgmt/Main.cc | 1239 + mgmt/Main.h | 53 + mgmt/Makefile.am | 117 + mgmt/Makefile.in | 1203 + mgmt/MgmtDefs.h | 88 + mgmt/MultiFile.cc | 247 + mgmt/MultiFile.h | 77 + mgmt/ProcessManager.cc | 343 + mgmt/ProcessManager.h | 109 + mgmt/ProxyConfig.cc | 365 + mgmt/ProxyConfig.h | 143 + mgmt/RecordsConfig.cc | 2141 ++ mgmt/RecordsConfig.h | 56 + mgmt/Rollback.cc | 1031 + mgmt/Rollback.h | 224 + mgmt/api/APITestCliRemote.cc | 2441 ++ mgmt/api/CfgContextDefs.h | 74 + mgmt/api/CfgContextImpl.cc | 2592 ++ mgmt/api/CfgContextImpl.h | 560 + mgmt/api/CfgContextManager.cc | 621 + mgmt/api/CfgContextManager.h | 77 + mgmt/api/CfgContextUtils.cc | 2854 ++ mgmt/api/CfgContextUtils.h | 202 + mgmt/api/CoreAPI.cc | 908 + mgmt/api/CoreAPI.h | 105 + mgmt/api/CoreAPIRemote.cc | 851 + mgmt/api/CoreAPIShared.cc | 468 + mgmt/api/CoreAPIShared.h | 88 + mgmt/api/EventCallback.cc | 346 + mgmt/api/EventCallback.h | 85 + mgmt/api/EventControlMain.cc | 502 + mgmt/api/EventControlMain.h | 63 + mgmt/api/EventRegistration.cc | 166 + mgmt/api/EventRegistration.h | 49 + mgmt/api/GenericParser.cc | 1060 + mgmt/api/GenericParser.h | 304 + mgmt/api/INKMgmtAPI.cc | 2483 ++ mgmt/api/INKMgmtAPIStub.cc | 799 + mgmt/api/Makefile.am | 93 + mgmt/api/Makefile.in | 1167 + mgmt/api/NetworkUtilsDefs.h | 87 + mgmt/api/NetworkUtilsLocal.cc | 775 + mgmt/api/NetworkUtilsLocal.h | 81 + mgmt/api/NetworkUtilsRemote.cc | 1559 + mgmt/api/NetworkUtilsRemote.h | 109 + mgmt/api/TSControlMain.cc | 1002 + mgmt/api/TSControlMain.h | 76 + mgmt/api/include/Makefile.am | 22 + mgmt/api/include/Makefile.in | 811 + mgmt/api/include/mgmtapi.h | 1389 + mgmt/cluster/ClusterCom.cc | 2468 ++ mgmt/cluster/ClusterCom.h | 188 + mgmt/cluster/Makefile.am | 39 + mgmt/cluster/Makefile.in | 873 + mgmt/cluster/VMap.cc | 892 + mgmt/cluster/VMap.h | 125 + mgmt/stats/Makefile.am | 43 + mgmt/stats/Makefile.in | 879 + mgmt/stats/StatProcessor.cc | 358 + mgmt/stats/StatProcessor.h | 94 + mgmt/stats/StatType.cc | 1199 + mgmt/stats/StatType.h | 231 + mgmt/stats/StatXML.cc | 82 + mgmt/stats/StatXML.h | 47 + mgmt/stats/spec | 236 + mgmt/utils/ExpandingArray.cc | 83 + mgmt/utils/ExpandingArray.h | 62 + mgmt/utils/Makefile.am | 53 + mgmt/utils/Makefile.in | 942 + mgmt/utils/MgmtHashTable.h | 187 + mgmt/utils/MgmtSocket.h | 199 + mgmt/utils/MgmtUtils.cc | 622 + mgmt/utils/MgmtUtils.h | 70 + mgmt/utils/WebMgmtUtils.cc | 1377 + mgmt/utils/WebMgmtUtils.h | 124 + mgmt/web2/Makefile.am | 54 + mgmt/web2/Makefile.in | 898 + mgmt/web2/WebCompatibility.cc | 227 + mgmt/web2/WebCompatibility.h | 89 + mgmt/web2/WebGlobals.h | 144 + mgmt/web2/WebHttp.cc | 631 + mgmt/web2/WebHttp.h | 45 + mgmt/web2/WebHttpContext.cc | 108 + mgmt/web2/WebHttpContext.h | 69 + mgmt/web2/WebHttpMessage.cc | 576 + mgmt/web2/WebHttpMessage.h | 287 + mgmt/web2/WebHttpSession.cc | 218 + mgmt/web2/WebHttpSession.h | 46 + mgmt/web2/WebIntrMain.cc | 617 + mgmt/web2/WebIntrMain.h | 42 + mgmt/web2/WebOverview.cc | 846 + mgmt/web2/WebOverview.h | 151 + mgmt/web2/WebUtils.cc | 115 + mgmt/web2/WebUtils.h | 53 + plugins/Makefile.am | 33 + plugins/Makefile.in | 869 + plugins/cacheurl/Makefile.am | 21 + plugins/cacheurl/Makefile.in | 934 + plugins/cacheurl/cacheurl.cc | 445 + plugins/cacheurl/cacheurl.config.example | 39 + plugins/conf_remap/Makefile.am | 21 + plugins/conf_remap/Makefile.in | 935 + plugins/conf_remap/conf_remap.cc | 349 + plugins/experimental/Makefile.am | 40 + plugins/experimental/Makefile.in | 874 + plugins/experimental/authproxy/Makefile.am | 22 + plugins/experimental/authproxy/Makefile.in | 953 + plugins/experimental/authproxy/authproxy.cc | 887 + plugins/experimental/authproxy/utils.cc | 216 + plugins/experimental/authproxy/utils.h | 111 + .../experimental/background_fetch/Makefile.am | 21 + .../experimental/background_fetch/Makefile.in | 935 + .../background_fetch/background_fetch.cc | 630 + plugins/experimental/balancer/Makefile.am | 22 + plugins/experimental/balancer/Makefile.in | 954 + plugins/experimental/balancer/balancer.cc | 133 + plugins/experimental/balancer/balancer.h | 46 + plugins/experimental/balancer/hash.cc | 207 + plugins/experimental/balancer/roundrobin.cc | 72 + .../experimental/buffer_upload/Makefile.am | 21 + .../experimental/buffer_upload/Makefile.in | 935 + plugins/experimental/buffer_upload/README | 77 + .../buffer_upload/buffer_upload.cc | 1225 + .../experimental/channel_stats/Makefile.am | 21 + .../experimental/channel_stats/Makefile.in | 935 + .../experimental/channel_stats/Makefile.tsxs | 28 + plugins/experimental/channel_stats/README | 105 + .../channel_stats/channel_stats.cc | 825 + .../experimental/channel_stats/debug_macros.h | 73 + .../experimental/collapsed_connection/LICENSE | 203 + .../collapsed_connection/Makefile.am | 25 + .../collapsed_connection/Makefile.in | 942 + .../collapsed_connection/MurmurHash3.cc | 234 + .../collapsed_connection/MurmurHash3.h | 20 + .../experimental/collapsed_connection/NOTICE | 9 + .../P_collapsed_connection.h | 123 + .../experimental/collapsed_connection/README | 71 + .../collapsed_connection.cc | 1157 + .../collapsed_connection/default.config | 5 + .../collapsed_connection/state.dot | 40 + .../experimental/custom_redirect/Makefile.am | 22 + .../experimental/custom_redirect/Makefile.in | 935 + plugins/experimental/custom_redirect/README | 41 + .../custom_redirect/custom_redirect.cc | 173 + plugins/experimental/escalate/Makefile.am | 22 + plugins/experimental/escalate/Makefile.in | 934 + plugins/experimental/escalate/escalate.cc | 220 + plugins/experimental/esi/Makefile.am | 99 + plugins/experimental/esi/Makefile.in | 1557 + plugins/experimental/esi/README | 5 + plugins/experimental/esi/README.combo | 106 + plugins/experimental/esi/combo_handler.cc | 989 + plugins/experimental/esi/esi.cc | 1811 + .../esi/fetcher/FetchedDataProcessor.h | 41 + .../esi/fetcher/HttpDataFetcher.h | 74 + .../esi/fetcher/HttpDataFetcherImpl.cc | 357 + .../esi/fetcher/HttpDataFetcherImpl.h | 148 + plugins/experimental/esi/handlers.cfg | 27 + plugins/experimental/esi/lib/Attribute.h | 44 + plugins/experimental/esi/lib/ComponentBase.h | 60 + plugins/experimental/esi/lib/DocNode.cc | 162 + plugins/experimental/esi/lib/DocNode.h | 114 + plugins/experimental/esi/lib/EsiGzip.cc | 151 + plugins/experimental/esi/lib/EsiGzip.h | 60 + plugins/experimental/esi/lib/EsiParser.cc | 656 + plugins/experimental/esi/lib/EsiParser.h | 152 + plugins/experimental/esi/lib/EsiProcessor.cc | 853 + plugins/experimental/esi/lib/EsiProcessor.h | 168 + plugins/experimental/esi/lib/Expression.cc | 256 + plugins/experimental/esi/lib/Expression.h | 108 + plugins/experimental/esi/lib/FailureInfo.cc | 109 + plugins/experimental/esi/lib/FailureInfo.h | 113 + .../experimental/esi/lib/HandlerManager.cc | 92 + plugins/experimental/esi/lib/HandlerManager.h | 75 + plugins/experimental/esi/lib/HttpHeader.h | 45 + .../esi/lib/IncludeHandlerFactory.h | 54 + .../esi/lib/SpecialIncludeHandler.h | 73 + plugins/experimental/esi/lib/Stats.cc | 69 + plugins/experimental/esi/lib/Stats.h | 65 + plugins/experimental/esi/lib/StringHash.h | 47 + plugins/experimental/esi/lib/Utils.cc | 195 + plugins/experimental/esi/lib/Utils.h | 118 + plugins/experimental/esi/lib/Variables.cc | 446 + plugins/experimental/esi/lib/Variables.h | 161 + plugins/experimental/esi/lib/gzip.cc | 197 + plugins/experimental/esi/lib/gzip.h | 69 + plugins/experimental/esi/serverIntercept.cc | 357 + plugins/experimental/esi/serverIntercept.h | 38 + plugins/experimental/esi/test/HandlerMap.cc | 26 + plugins/experimental/esi/test/HandlerMap.h | 37 + .../esi/test/StubIncludeHandler.cc | 73 + .../esi/test/StubIncludeHandler.h | 66 + .../esi/test/TestHandlerManager.cc | 42 + .../esi/test/TestHttpDataFetcher.h | 79 + plugins/experimental/esi/test/docnode_test.cc | 249 + plugins/experimental/esi/test/parser_test.cc | 1591 + plugins/experimental/esi/test/print_funcs.cc | 47 + plugins/experimental/esi/test/print_funcs.h | 31 + .../experimental/esi/test/processor_test.cc | 1134 + plugins/experimental/esi/test/sampleProb.cc | 228 + plugins/experimental/esi/test/utils_test.cc | 119 + plugins/experimental/esi/test/vars_test.cc | 405 + plugins/experimental/geoip_acl/Makefile.am | 26 + plugins/experimental/geoip_acl/Makefile.in | 940 + plugins/experimental/geoip_acl/README | 85 + plugins/experimental/geoip_acl/acl.cc | 248 + plugins/experimental/geoip_acl/acl.h | 130 + plugins/experimental/geoip_acl/examples | 40 + plugins/experimental/geoip_acl/geoip_acl.cc | 127 + plugins/experimental/geoip_acl/lulu.h | 52 + plugins/experimental/healthchecks/Makefile.am | 25 + plugins/experimental/healthchecks/Makefile.in | 939 + plugins/experimental/healthchecks/README | 21 + .../experimental/healthchecks/healthchecks.c | 585 + plugins/experimental/hipes/Makefile.am | 22 + plugins/experimental/hipes/Makefile.in | 934 + plugins/experimental/hipes/README | 25 + plugins/experimental/hipes/gen_escape.c | 49 + plugins/experimental/hipes/hipes.cc | 563 + plugins/experimental/memcached_remap/AUTHORS | 7 + .../experimental/memcached_remap/Changelog | 2 + plugins/experimental/memcached_remap/NOTICE | 5 + plugins/experimental/memcached_remap/README | 122 + .../memcached_remap/memcached_remap.cc | 242 + .../experimental/memcached_remap/sample.py | 34 + plugins/experimental/metalink/Makefile.am | 21 + plugins/experimental/metalink/Makefile.in | 934 + plugins/experimental/metalink/README | 78 + plugins/experimental/metalink/metalink.cc | 916 + .../metalink/test/chunkedEncoding | 99 + .../metalink/test/chunkedEncodingDisconnect | 97 + .../metalink/test/clientDisconnect | 89 + .../experimental/metalink/test/contentLength | 100 + .../metalink/test/contentLengthDisconnect | 94 + .../test/finalChunkedEncodingDisconnect | 112 + plugins/experimental/metalink/test/headers | 75 + plugins/experimental/metalink/test/http09 | 85 + plugins/experimental/metalink/test/location | 124 + plugins/experimental/metalink/test/longer | 94 + .../experimental/metalink/test/notModified | 77 + .../metalink/test/pipeliningDisconnect | 93 + .../test/shortChunkedEncodingDisconnect | 96 + .../metalink/test/shortClientDisconnect | 85 + .../test/shortContentLengthDisconnect | 93 + plugins/experimental/metalink/test/zero | 90 + plugins/experimental/mysql_remap/AUTHORS | 7 + plugins/experimental/mysql_remap/README | 74 + plugins/experimental/mysql_remap/default.h | 25 + .../experimental/mysql_remap/lib/dictionary.c | 428 + .../experimental/mysql_remap/lib/dictionary.h | 197 + .../experimental/mysql_remap/lib/iniparser.c | 669 + .../experimental/mysql_remap/lib/iniparser.h | 303 + .../experimental/mysql_remap/mysql_remap.cc | 249 + plugins/experimental/mysql_remap/sample.ini | 24 + .../mysql_remap/schema/import.sql | 134 + .../experimental/regex_revalidate/Makefile.am | 21 + .../experimental/regex_revalidate/Makefile.in | 935 + .../regex_revalidate/regex_revalidate.c | 594 + plugins/experimental/remap_stats/Makefile.am | 25 + plugins/experimental/remap_stats/Makefile.in | 939 + .../experimental/remap_stats/remap_stats.c | 305 + plugins/experimental/s3_auth/Makefile.am | 21 + plugins/experimental/s3_auth/Makefile.in | 934 + plugins/experimental/s3_auth/s3_auth.cc | 528 + plugins/experimental/spdy/Makefile.am | 56 + plugins/experimental/spdy/TODO | 20 + plugins/experimental/spdy/http.cc | 307 + plugins/experimental/spdy/http.h | 89 + plugins/experimental/spdy/io.cc | 87 + plugins/experimental/spdy/io.h | 164 + plugins/experimental/spdy/lib/base/atomic.h | 57 + plugins/experimental/spdy/lib/base/inet.h | 72 + plugins/experimental/spdy/lib/base/logging.cc | 48 + plugins/experimental/spdy/lib/base/logging.h | 78 + plugins/experimental/spdy/lib/spdy/message.cc | 535 + plugins/experimental/spdy/lib/spdy/spdy.h | 291 + plugins/experimental/spdy/lib/spdy/zstream.cc | 123 + plugins/experimental/spdy/lib/spdy/zstream.h | 119 + plugins/experimental/spdy/protocol.cc | 188 + plugins/experimental/spdy/protocol.h | 47 + plugins/experimental/spdy/spdy.cc | 395 + plugins/experimental/spdy/stream.cc | 371 + plugins/experimental/spdy/strings.cc | 100 + plugins/experimental/spdy/tests/stubs.cc | 39 + .../experimental/spdy/tests/zstream_test.cc | 227 + .../stale_while_revalidate/Makefile.am | 21 + .../stale_while_revalidate/Makefile.in | 936 + .../stale_while_revalidate/Makefile.tsxs | 27 + .../stale_while_revalidate/README | 26 + .../stale_while_revalidate.c | 744 + .../stale_while_revalidate/test_server.js | 42 + plugins/experimental/ts_lua/Makefile.am | 50 + plugins/experimental/ts_lua/Makefile.in | 1141 + plugins/experimental/ts_lua/README.md | 5 + .../ts_lua/business/mediaslice.lua | 155 + .../experimental/ts_lua/business/sethost.lua | 41 + .../ts_lua/example/test_cache_lookup.lua | 44 + .../ts_lua/example/test_client_socket.lua | 40 + .../ts_lua/example/test_closure.lua | 23 + .../ts_lua/example/test_creq_method.lua | 40 + .../ts_lua/example/test_creq_uri.lua | 40 + .../ts_lua/example/test_creq_uri_args.lua | 36 + .../ts_lua/example/test_global_hdr.lua | 37 + .../ts_lua/example/test_global_hook.lua | 88 + .../experimental/ts_lua/example/test_hdr.lua | 37 + .../ts_lua/example/test_intercept.lua | 42 + .../experimental/ts_lua/example/test_post.lua | 48 + .../ts_lua/example/test_ret_403.lua | 38 + .../ts_lua/example/test_server_intercept.lua | 41 + .../ts_lua/example/test_transform.lua | 49 + .../ts_lua/example/test_txn_hook.lua | 100 + plugins/experimental/ts_lua/ts_lua.c | 467 + .../ts_lua/ts_lua_cached_response.c | 223 + .../ts_lua/ts_lua_cached_response.h | 27 + .../ts_lua/ts_lua_client_request.c | 784 + .../ts_lua/ts_lua_client_request.h | 27 + .../ts_lua/ts_lua_client_response.c | 395 + .../ts_lua/ts_lua_client_response.h | 27 + plugins/experimental/ts_lua/ts_lua_common.h | 220 + plugins/experimental/ts_lua/ts_lua_context.c | 93 + plugins/experimental/ts_lua/ts_lua_context.h | 28 + plugins/experimental/ts_lua/ts_lua_crypto.c | 177 + plugins/experimental/ts_lua/ts_lua_crypto.h | 27 + plugins/experimental/ts_lua/ts_lua_hook.c | 281 + plugins/experimental/ts_lua/ts_lua_hook.h | 27 + plugins/experimental/ts_lua/ts_lua_http.c | 283 + plugins/experimental/ts_lua/ts_lua_http.h | 27 + .../experimental/ts_lua/ts_lua_http_cntl.c | 101 + .../experimental/ts_lua/ts_lua_http_cntl.h | 27 + .../experimental/ts_lua/ts_lua_http_config.c | 304 + .../experimental/ts_lua/ts_lua_http_config.h | 27 + .../ts_lua/ts_lua_http_intercept.c | 335 + .../ts_lua/ts_lua_http_intercept.h | 27 + plugins/experimental/ts_lua/ts_lua_log.c | 116 + plugins/experimental/ts_lua/ts_lua_log.h | 27 + plugins/experimental/ts_lua/ts_lua_mgmt.c | 106 + plugins/experimental/ts_lua/ts_lua_mgmt.h | 27 + plugins/experimental/ts_lua/ts_lua_misc.c | 230 + plugins/experimental/ts_lua/ts_lua_misc.h | 27 + plugins/experimental/ts_lua/ts_lua_package.c | 297 + plugins/experimental/ts_lua/ts_lua_package.h | 27 + plugins/experimental/ts_lua/ts_lua_remap.c | 59 + plugins/experimental/ts_lua/ts_lua_remap.h | 27 + .../ts_lua/ts_lua_server_request.c | 404 + .../ts_lua/ts_lua_server_request.h | 27 + .../ts_lua/ts_lua_server_response.c | 319 + .../ts_lua/ts_lua_server_response.h | 27 + plugins/experimental/ts_lua/ts_lua_string.c | 32 + plugins/experimental/ts_lua/ts_lua_string.h | 30 + .../experimental/ts_lua/ts_lua_transform.c | 172 + .../experimental/ts_lua/ts_lua_transform.h | 27 + plugins/experimental/ts_lua/ts_lua_util.c | 715 + plugins/experimental/ts_lua/ts_lua_util.h | 52 + plugins/experimental/url_sig/Makefile.am | 21 + plugins/experimental/url_sig/Makefile.in | 934 + plugins/experimental/url_sig/Makefile.tsxs | 26 + plugins/experimental/url_sig/README | 183 + plugins/experimental/url_sig/genkeys.pl | 29 + plugins/experimental/url_sig/sign.pl | 101 + plugins/experimental/url_sig/url_sig.c | 476 + plugins/experimental/url_sig/url_sig.h | 52 + plugins/experimental/xdebug/Makefile.am | 22 + plugins/experimental/xdebug/Makefile.in | 934 + plugins/experimental/xdebug/xdebug.cc | 216 + plugins/gzip/Makefile.am | 21 + plugins/gzip/Makefile.in | 936 + plugins/gzip/README | 72 + plugins/gzip/configuration.cc | 259 + plugins/gzip/configuration.h | 84 + plugins/gzip/debug_macros.h | 59 + plugins/gzip/gzip.cc | 808 + plugins/gzip/misc.cc | 197 + plugins/gzip/misc.h | 81 + plugins/gzip/sample.gzip.config | 57 + plugins/header_rewrite/Examples/Force-close | 21 + plugins/header_rewrite/Examples/Regression | 32 + plugins/header_rewrite/Examples/YCS-EC | 36 + plugins/header_rewrite/Makefile.am | 42 + plugins/header_rewrite/Makefile.in | 976 + plugins/header_rewrite/README | 27 + plugins/header_rewrite/TODO | 29 + plugins/header_rewrite/condition.cc | 81 + plugins/header_rewrite/condition.h | 116 + plugins/header_rewrite/conditions.cc | 488 + plugins/header_rewrite/conditions.h | 352 + plugins/header_rewrite/expander.cc | 117 + plugins/header_rewrite/expander.h | 43 + plugins/header_rewrite/factory.cc | 123 + plugins/header_rewrite/factory.h | 34 + plugins/header_rewrite/header_rewrite.cc | 446 + plugins/header_rewrite/lulu.cc | 63 + plugins/header_rewrite/lulu.h | 70 + plugins/header_rewrite/matcher.cc | 26 + plugins/header_rewrite/matcher.h | 171 + plugins/header_rewrite/operator.cc | 61 + plugins/header_rewrite/operator.h | 97 + plugins/header_rewrite/operators.cc | 545 + plugins/header_rewrite/operators.h | 283 + plugins/header_rewrite/parser.cc | 121 + plugins/header_rewrite/parser.h | 66 + plugins/header_rewrite/regex_helper.cc | 69 + plugins/header_rewrite/regex_helper.h | 62 + plugins/header_rewrite/resources.cc | 128 + plugins/header_rewrite/resources.h | 89 + plugins/header_rewrite/ruleset.cc | 92 + plugins/header_rewrite/ruleset.h | 98 + plugins/header_rewrite/statement.cc | 100 + plugins/header_rewrite/statement.h | 107 + plugins/header_rewrite/value.h | 101 + plugins/libloader/Makefile.am | 21 + plugins/libloader/Makefile.in | 934 + plugins/libloader/libloader.c | 85 + plugins/regex_remap/Makefile.am | 21 + plugins/regex_remap/Makefile.in | 935 + plugins/regex_remap/README | 130 + plugins/regex_remap/regex_remap.cc | 1053 + plugins/stats_over_http/Makefile.am | 21 + plugins/stats_over_http/Makefile.in | 935 + plugins/stats_over_http/README | 30 + plugins/stats_over_http/stats_over_http.c | 273 + plugins/tcpinfo/Makefile.am | 22 + plugins/tcpinfo/Makefile.in | 934 + plugins/tcpinfo/tcpinfo.cc | 431 + proxy/AbstractBuffer.cc | 302 + proxy/AbstractBuffer.h | 184 + proxy/CacheControl.cc | 502 + proxy/CacheControl.h | 156 + proxy/ClassH.txt | 97 + proxy/CompletionUtil.h | 47 + proxy/ConfigParse.h | 41 + proxy/ControlBase.cc | 661 + proxy/ControlBase.h | 111 + proxy/ControlMatcher.cc | 1029 + proxy/ControlMatcher.h | 301 + proxy/CoreUtils.cc | 1055 + proxy/CoreUtils.h | 234 + proxy/DebugStreamLevels.txt | 4 + proxy/DynamicStats.h | 103 + proxy/EventName.cc | 115 + proxy/EventName.h | 38 + proxy/FetchSM.cc | 645 + proxy/FetchSM.h | 174 + proxy/HttpTransStats.h | 26 + proxy/ICP.cc | 2457 ++ proxy/ICP.h | 1308 + proxy/ICPConfig.cc | 1511 + proxy/ICPProcessor.cc | 59 + proxy/ICPProcessor.h | 80 + proxy/ICPStats.cc | 125 + proxy/ICPevents.h | 43 + proxy/ICPlog.h | 72 + proxy/IPAllow.cc | 324 + proxy/IPAllow.h | 150 + proxy/InkAPI-ensure-funcs-present.pl | 49 + proxy/InkAPI.cc | 8593 +++++ proxy/InkAPIInternal.h | 339 + proxy/InkAPITest.cc | 7667 ++++ proxy/InkAPITestTool.cc | 1013 + proxy/InkIOCoreAPI.cc | 755 + proxy/InkPool_r.h | 71 + proxy/Main.cc | 1738 + proxy/Main.h | 83 + proxy/Makefile.am | 363 + proxy/Makefile.in | 1842 + proxy/ParentSelection.cc | 1277 + proxy/ParentSelection.h | 239 + proxy/Plugin.cc | 296 + proxy/Plugin.h | 90 + proxy/PluginVC.cc | 1312 + proxy/PluginVC.h | 270 + proxy/Prefetch.cc | 2277 ++ proxy/Prefetch.h | 440 + proxy/ProtoSM.h | 150 + proxy/ProtocolProbeSessionAccept.cc | 140 + proxy/ProtocolProbeSessionAccept.h | 72 + proxy/README-stats.otl | 615 + proxy/RegressionSM.cc | 249 + proxy/RegressionSM.h | 80 + proxy/ReverseProxy.cc | 207 + proxy/ReverseProxy.h | 72 + proxy/Show.h | 147 + proxy/SocksProxy.cc | 579 + proxy/StatPages.cc | 279 + proxy/StatPages.h | 137 + proxy/StatSystem.cc | 786 + proxy/StatSystem.h | 636 + proxy/StufferUdpReceiver.cc | 335 + proxy/TestClock.cc | 46 + proxy/TestClusterHash.cc | 120 + proxy/TestDNS.cc | 258 + proxy/TestPreProc.cc | 188 + proxy/TestPreProc.h | 51 + proxy/TestProxy.cc | 408 + proxy/TestRegex.cc | 30 + proxy/TestSimpleProxy.cc | 160 + proxy/TimeTrace.h | 85 + proxy/Transform.cc | 1031 + proxy/Transform.h | 117 + proxy/TransformInternal.h | 166 + proxy/UDPAPIClientTest.cc | 116 + proxy/UDPAPIClientTest.h | 28 + proxy/UDPAPITest.cc | 131 + proxy/UDPAPITest.h | 28 + proxy/UnixCompletionUtil.h | 104 + proxy/Update.cc | 2658 ++ proxy/Update.h | 527 + proxy/UserNameCacheTest.h | 25 + proxy/api/ts/InkAPIPrivateIOCore.h | 238 + proxy/api/ts/Makefile.am | 27 + proxy/api/ts/Makefile.in | 816 + proxy/api/ts/TsException.h | 61 + proxy/api/ts/experimental.h | 727 + proxy/api/ts/remap.h | 137 + proxy/api/ts/ts.h | 2344 ++ proxy/config/Makefile.am | 55 + proxy/config/Makefile.in | 989 + proxy/config/body_factory/Makefile.am | 20 + proxy/config/body_factory/Makefile.in | 868 + .../body_factory/default/.body_factory_info | 16 + proxy/config/body_factory/default/Makefile.am | 49 + proxy/config/body_factory/default/Makefile.in | 772 + proxy/config/body_factory/default/README | 17 + .../config/body_factory/default/access#denied | 14 + .../default/access#proxy_auth_required | 14 + .../body_factory/default/access#redirect_url | 15 + .../body_factory/default/access#ssl_forbidden | 15 + .../body_factory/default/cache#not_in_cache | 16 + .../body_factory/default/cache#read_error | 14 + .../default/congestion#retryAfter | 15 + .../body_factory/default/connect#dns_failed | 17 + .../default/connect#failed_connect | 14 + .../body_factory/default/connect#hangup | 15 + proxy/config/body_factory/default/default | 14 + .../body_factory/default/interception#no_host | 17 + .../default/redirect#moved_permanently | 14 + .../default/redirect#moved_temporarily | 14 + .../default/request#cycle_detected | 16 + .../default/request#invalid_content_length | 15 + .../default/request#no_content_length | 15 + .../body_factory/default/request#no_host | 17 + .../default/request#scheme_unsupported | 15 + .../body_factory/default/request#syntax_error | 14 + .../default/response#bad_response | 14 + .../body_factory/default/response#bad_version | 15 + .../body_factory/default/timeout#activity | 14 + .../body_factory/default/timeout#inactivity | 14 + .../default/transcoding#unsupported | 16 + .../default/urlrouting#no_mapping | 15 + proxy/config/cache.config.default | 53 + proxy/config/cluster.config.default | 26 + proxy/config/congestion.config.default | 56 + proxy/config/hosting.config.default | 27 + proxy/config/icp.config.default | 39 + proxy/config/ip_allow.config.default | 23 + proxy/config/log_hosts.config.default | 11 + proxy/config/logs_xml.config.default | 391 + proxy/config/parent.config.default | 52 + proxy/config/plugin.config.default | 9 + proxy/config/prefetch.config.default | 58 + proxy/config/public_key.der | Bin 0 -> 436 bytes proxy/config/records.config.default.in | 197 + proxy/config/remap.config.default | 192 + proxy/config/socks.config.default | 49 + proxy/config/splitdns.config.default | 58 + proxy/config/ssl_multicert.config.default | 55 + proxy/config/stats.config.dtd | 14 + proxy/config/stats.config.xml.default | 1951 + proxy/config/storage.config.default.in | 49 + proxy/config/throttle_error.html.example | 32 + proxy/config/update.config.default | 43 + proxy/config/vaddrs.config.default | 22 + proxy/config/volume.config.default | 30 + proxy/congest/Congestion.cc | 737 + proxy/congest/Congestion.h | 470 + proxy/congest/CongestionDB.cc | 638 + proxy/congest/CongestionDB.h | 115 + proxy/congest/CongestionStats.cc | 52 + proxy/congest/CongestionStats.h | 51 + proxy/congest/CongestionTest.cc | 553 + proxy/congest/FeatureSpec.txt | 428 + proxy/congest/MT_hashtable.h | 405 + proxy/congest/Makefile.am | 45 + proxy/congest/Makefile.in | 881 + proxy/example_alarm_bin.sh | 71 + proxy/example_prep.sh | 32 + proxy/hdrs/HTTP.cc | 2129 ++ proxy/hdrs/HTTP.h | 1525 + proxy/hdrs/HdrHeap.cc | 1293 + proxy/hdrs/HdrHeap.h | 494 + proxy/hdrs/HdrTSOnly.cc | 199 + proxy/hdrs/HdrTest.cc | 2278 ++ proxy/hdrs/HdrTest.h | 83 + proxy/hdrs/HdrToken.cc | 767 + proxy/hdrs/HdrToken.h | 391 + proxy/hdrs/HdrUtils.cc | 181 + proxy/hdrs/HdrUtils.h | 183 + proxy/hdrs/HttpCompat.cc | 871 + proxy/hdrs/HttpCompat.h | 96 + proxy/hdrs/MIME.cc | 3796 ++ proxy/hdrs/MIME.h | 1636 + proxy/hdrs/Makefile.am | 65 + proxy/hdrs/Makefile.in | 914 + proxy/hdrs/URL.cc | 1752 + proxy/hdrs/URL.h | 736 + proxy/hdrs/load_http_hdr.cc | 341 + proxy/hdrs/test_header.cc | 1044 + proxy/hdrs/test_urlhash.cc | 90 + proxy/http/HttpBodyFactory.cc | 1019 + proxy/http/HttpBodyFactory.h | 244 + proxy/http/HttpCacheSM.cc | 324 + proxy/http/HttpCacheSM.h | 176 + proxy/http/HttpClientSession.cc | 636 + proxy/http/HttpClientSession.h | 176 + proxy/http/HttpConfig.cc | 1905 + proxy/http/HttpConfig.h | 971 + proxy/http/HttpConnectionCount.cc | 27 + proxy/http/HttpConnectionCount.h | 96 + proxy/http/HttpDebugNames.cc | 486 + proxy/http/HttpDebugNames.h | 41 + proxy/http/HttpPages.cc | 480 + proxy/http/HttpPages.h | 91 + proxy/http/HttpProxyAPIEnums.h | 47 + proxy/http/HttpProxyServerMain.cc | 321 + proxy/http/HttpProxyServerMain.h | 37 + proxy/http/HttpSM.cc | 7619 ++++ proxy/http/HttpSM.h | 644 + proxy/http/HttpServerSession.cc | 188 + proxy/http/HttpServerSession.h | 184 + proxy/http/HttpSessionAccept.cc | 105 + proxy/http/HttpSessionAccept.h | 210 + proxy/http/HttpSessionManager.cc | 335 + proxy/http/HttpSessionManager.h | 105 + proxy/http/HttpTransact.cc | 8810 +++++ proxy/http/HttpTransact.h | 1425 + proxy/http/HttpTransactCache.cc | 1570 + proxy/http/HttpTransactCache.h | 143 + proxy/http/HttpTransactHeaders.cc | 1127 + proxy/http/HttpTransactHeaders.h | 140 + proxy/http/HttpTunnel.cc | 1631 + proxy/http/HttpTunnel.h | 578 + proxy/http/HttpUpdateSM.cc | 232 + proxy/http/HttpUpdateSM.h | 84 + proxy/http/HttpUpdateTester.cc | 126 + proxy/http/Makefile.am | 85 + proxy/http/Makefile.in | 1053 + proxy/http/README.via | 85 + proxy/http/RegressionHttpTransact.cc | 103 + proxy/http/TestHttpTransact.cc | 112 + proxy/http/TestUrl.cc | 118 + proxy/http/remap/AclFiltering.cc | 192 + proxy/http/remap/AclFiltering.h | 99 + proxy/http/remap/Makefile.am | 51 + proxy/http/remap/Makefile.in | 898 + proxy/http/remap/RemapConfig.cc | 1262 + proxy/http/remap/RemapConfig.h | 79 + proxy/http/remap/RemapPluginInfo.cc | 98 + proxy/http/remap/RemapPluginInfo.h | 84 + proxy/http/remap/RemapPlugins.cc | 199 + proxy/http/remap/RemapPlugins.h | 75 + proxy/http/remap/RemapProcessor.cc | 334 + proxy/http/remap/RemapProcessor.h | 72 + proxy/http/remap/UrlMapping.cc | 237 + proxy/http/remap/UrlMapping.h | 190 + proxy/http/remap/UrlMappingPathIndex.cc | 92 + proxy/http/remap/UrlMappingPathIndex.h | 98 + proxy/http/remap/UrlRewrite.cc | 984 + proxy/http/remap/UrlRewrite.h | 193 + proxy/http/stats.memo | 247 + proxy/http/test_http_client.pl | 347 + proxy/http/test_proxy.pl | 326 + proxy/http/test_socket_close.cc | 645 + proxy/http/testheaders.cc | 132 + proxy/ink_icon.ico | Bin 0 -> 3774 bytes proxy/issues.txt | 252 + proxy/logcat.cc | 346 + proxy/logging/Log.cc | 1498 + proxy/logging/Log.h | 486 + proxy/logging/LogAccess.cc | 1427 + proxy/logging/LogAccess.h | 368 + proxy/logging/LogAccessHttp.cc | 1291 + proxy/logging/LogAccessHttp.h | 181 + proxy/logging/LogAccessICP.cc | 293 + proxy/logging/LogAccessICP.h | 95 + proxy/logging/LogAccessTest.cc | 396 + proxy/logging/LogAccessTest.h | 106 + proxy/logging/LogBuffer.cc | 820 + proxy/logging/LogBuffer.h | 324 + proxy/logging/LogBufferSink.h | 53 + proxy/logging/LogCollationAccept.cc | 111 + proxy/logging/LogCollationAccept.h | 47 + proxy/logging/LogCollationBase.h | 51 + proxy/logging/LogCollationClientSM.cc | 738 + proxy/logging/LogCollationClientSM.h | 125 + proxy/logging/LogCollationHostSM.cc | 527 + proxy/logging/LogCollationHostSM.h | 117 + proxy/logging/LogConfig.cc | 2279 ++ proxy/logging/LogConfig.h | 275 + proxy/logging/LogField.cc | 636 + proxy/logging/LogField.h | 268 + proxy/logging/LogFieldAliasMap.cc | 84 + proxy/logging/LogFieldAliasMap.h | 225 + proxy/logging/LogFile.cc | 896 + proxy/logging/LogFile.h | 207 + proxy/logging/LogFilter.cc | 821 + proxy/logging/LogFilter.h | 434 + proxy/logging/LogFormat.cc | 853 + proxy/logging/LogFormat.h | 161 + proxy/logging/LogHost.cc | 479 + proxy/logging/LogHost.h | 133 + proxy/logging/LogLimits.h | 42 + proxy/logging/LogObject.cc | 1445 + proxy/logging/LogObject.h | 406 + proxy/logging/LogPredefined.cc | 112 + proxy/logging/LogPredefined.h | 71 + proxy/logging/LogSock.cc | 743 + proxy/logging/LogSock.h | 139 + proxy/logging/LogStandalone.cc | 256 + proxy/logging/LogUtils.cc | 572 + proxy/logging/LogUtils.h | 59 + proxy/logging/Makefile.am | 85 + proxy/logging/Makefile.in | 954 + proxy/logstats.cc | 2584 ++ proxy/regression.pl | 166 + proxy/sac.cc | 130 + proxy/shared/DiagsConfig.cc | 401 + proxy/shared/DiagsConfig.h | 49 + proxy/shared/Error.cc | 109 + proxy/shared/Error.h | 173 + proxy/shared/InkXml.cc | 485 + proxy/shared/InkXml.h | 236 + proxy/shared/Makefile.am | 61 + proxy/shared/Makefile.in | 943 + proxy/shared/UglyLogStubs.cc | 205 + proxy/shared/signals.cc | 414 + proxy/shared/signals.h | 53 + proxy/spdy/Makefile.am | 48 + proxy/spdy/Makefile.in | 889 + proxy/spdy/SpdyCallbacks.cc | 543 + proxy/spdy/SpdyCallbacks.h | 269 + proxy/spdy/SpdyClientSession.cc | 456 + proxy/spdy/SpdyClientSession.h | 142 + proxy/spdy/SpdyCommon.cc | 184 + proxy/spdy/SpdyCommon.h | 109 + proxy/spdy/SpdyDefs.h | 39 + proxy/spdy/SpdySessionAccept.cc | 71 + proxy/spdy/SpdySessionAccept.h | 50 + proxy/test_xml_parser.cc | 55 + proxy/tests/logstats.blog | Bin 0 -> 14976 bytes proxy/tests/logstats.json | 243 + proxy/tests/logstats.summary | 164 + proxy/tests/test_logstats_json | 28 + proxy/tests/test_logstats_summary | 29 + rc/Makefile.am | 28 + rc/Makefile.in | 778 + rc/solaris.txt | 12 + rc/start_traffic_shell | 55 + rc/trafficserver.conf.in | 41 + rc/trafficserver.in | 465 + rc/trafficserver.service.in | 30 + rc/trafficserver.xml.in | 101 + tools/Makefile.am | 50 + tools/Makefile.in | 1062 + tools/apichecker.pl | 525 + tools/http_load/README | 7 + tools/http_load/http_load.c | 3015 ++ tools/http_load/merge_stats.pl | 73 + tools/http_load/port.h | 134 + tools/http_load/timers.c | 313 + tools/http_load/timers.h | 107 + tools/jtest/README | 78 + tools/jtest/README.zh.md | 409 + tools/jtest/jtest.cc | 4073 ++ tools/lighttpd_mod_generator/Makefile | 35 + tools/lighttpd_mod_generator/README | 25 + .../ats_lighttpd_benchmark | 21 + .../lighttpd_mod_generator/conf/lighttpd.conf | 36 + .../conf/lighttpd_1.conf | 32 + .../conf/lighttpd_10.conf | 33 + .../conf/lighttpd_11.conf | 33 + .../conf/lighttpd_12.conf | 33 + .../conf/lighttpd_13.conf | 33 + .../conf/lighttpd_14.conf | 33 + .../conf/lighttpd_15.conf | 33 + .../conf/lighttpd_16.conf | 33 + .../conf/lighttpd_17.conf | 33 + .../conf/lighttpd_18.conf | 33 + .../conf/lighttpd_19.conf | 33 + .../conf/lighttpd_2.conf | 33 + .../conf/lighttpd_20.conf | 33 + .../conf/lighttpd_21.conf | 33 + .../conf/lighttpd_22.conf | 33 + .../conf/lighttpd_23.conf | 33 + .../conf/lighttpd_24.conf | 33 + .../conf/lighttpd_3.conf | 33 + .../conf/lighttpd_4.conf | 33 + .../conf/lighttpd_5.conf | 32 + .../conf/lighttpd_6.conf | 33 + .../conf/lighttpd_7.conf | 32 + .../conf/lighttpd_8.conf | 33 + .../conf/lighttpd_9.conf | 32 + tools/lighttpd_mod_generator/mod_generator.c | 357 + tools/reports/commits_since.sh | 38 + tools/traffic_shell.pl | 900 + tools/tspush | 73 + tools/tsxs.in | 228 + 2418 files changed, 771426 insertions(+) create mode 100644 .clang-format create mode 100644 .gitmodules create mode 100644 .indent.pro create mode 100644 .vimrc create mode 100644 CHANGES create mode 100644 CRUFT.txt create mode 100644 INSTALL create mode 100644 LAYOUT create mode 100644 LICENSE create mode 100644 Makefile.am create mode 100644 Makefile.in create mode 100644 NOTICE create mode 100644 README create mode 100644 README-EC2 create mode 100644 REVIEWERS create mode 100644 STATUS create mode 100644 Vagrantfile create mode 100644 aclocal.m4 create mode 100755 build/aux/ar-lib create mode 100755 build/aux/compile create mode 100755 build/aux/config.guess create mode 100755 build/aux/config.sub create mode 100755 build/aux/depcomp create mode 100755 build/aux/install-sh create mode 100644 build/aux/ltmain.sh create mode 100755 build/aux/missing create mode 100755 build/aux/test-driver create mode 100755 build/aux/ylwrap create mode 100644 build/ax_boost_base.m4 create mode 100644 build/ax_check_openssl.m4 create mode 100644 build/ax_compare_version.m4 create mode 100644 build/ax_compiler_vendor.m4 create mode 100644 build/ax_cxx_compile_stdcxx_11.m4 create mode 100644 build/ax_lib_curl.m4 create mode 100644 build/ax_lib_readline.m4 create mode 100644 build/ax_path_generic.m4 create mode 100644 build/ax_perl_modules.m4 create mode 100644 build/ax_with_curses.m4 create mode 100644 build/common.m4 create mode 100644 build/crypto.m4 create mode 100644 build/jemalloc.m4 create mode 100644 build/libtool.m4 create mode 100644 build/ltoptions.m4 create mode 100644 build/ltsugar.m4 create mode 100644 build/ltversion.m4 create mode 100644 build/lt~obsolete.m4 create mode 100644 build/lzma.m4 create mode 100644 build/network.m4 create mode 100644 build/pcre.m4 create mode 100644 build/pkg.m4 create mode 100644 build/plugins.mk create mode 100644 build/tcl.m4 create mode 100644 build/tcmalloc.m4 create mode 100644 build/xml.m4 create mode 100644 build/zlib.m4 create mode 100644 cmd/Makefile.am create mode 100644 cmd/Makefile.in create mode 100644 cmd/traffic_cop/Makefile.am create mode 100644 cmd/traffic_cop/Makefile.in create mode 100644 cmd/traffic_cop/traffic_cop.cc create mode 100644 cmd/traffic_line/Makefile.am create mode 100644 cmd/traffic_line/Makefile.in create mode 100644 cmd/traffic_line/traffic_line.cc create mode 100644 cmd/traffic_top/Makefile.am create mode 100644 cmd/traffic_top/Makefile.in create mode 100644 cmd/traffic_top/README create mode 100644 cmd/traffic_top/stats.h create mode 100644 cmd/traffic_top/traffic_top.cc create mode 100644 config.layout create mode 100755 configure create mode 100644 configure.ac create mode 100644 contrib/install_trafficserver.sh create mode 100755 contrib/make-ssl-multicert create mode 100644 contrib/manifests/debian.pp create mode 100644 contrib/manifests/omnios.sh create mode 100644 contrib/manifests/redhat.pp create mode 100644 contrib/python/compare_RecordsConfigcc.py create mode 100644 contrib/python/compare_records_config.py create mode 100644 contrib/set_trafficserver.sh create mode 100644 doc/Doxyfile.in create mode 100644 doc/Makefile.am create mode 100644 doc/Makefile.in create mode 100644 doc/admin/cluster-howto.en.rst create mode 100644 doc/admin/configuring-cache.en.rst create mode 100644 doc/admin/configuring-traffic-server.en.rst create mode 100644 doc/admin/event-logging-formats.en.rst create mode 100644 doc/admin/explicit-proxy-caching.en.rst create mode 100644 doc/admin/faqs.en.rst create mode 100644 doc/admin/forward-proxy.en.rst create mode 100644 doc/admin/getting-started.en.rst create mode 100644 doc/admin/hierachical-caching.en.rst create mode 100644 doc/admin/http-proxy-caching.en.rst create mode 100644 doc/admin/index.en.rst create mode 100644 doc/admin/monitoring-traffic.en.rst create mode 100644 doc/admin/performance-tuning.en.rst create mode 100644 doc/admin/reverse-proxy-http-redirects.en.rst create mode 100644 doc/admin/security-options.en.rst create mode 100644 doc/admin/session-protocol.en.rst create mode 100644 doc/admin/traffic-server-error-messages.en.rst create mode 100644 doc/admin/transparent-proxy.en.rst create mode 100644 doc/admin/transparent-proxy/bridge.en.rst create mode 100644 doc/admin/transparent-proxy/build.en.rst create mode 100644 doc/admin/transparent-proxy/router-inline.en.rst create mode 100644 doc/admin/transparent-proxy/wccp-configuration.en.rst create mode 100644 doc/admin/working-log-files.en.rst create mode 100644 doc/arch/cache/cache-api.en.rst create mode 100644 doc/arch/cache/cache-appendix.en.rst create mode 100644 doc/arch/cache/cache-arch.en.rst create mode 100644 doc/arch/cache/cache-data-structures.en.rst create mode 100644 doc/arch/cache/cache.en.rst create mode 100644 doc/arch/cache/common.defs create mode 100644 doc/arch/cache/images/ats-cache-volume-definition.png create mode 100644 doc/arch/cache/images/ats-cache-volume-directory.png create mode 100644 doc/arch/cache/images/ats-cache-volume-layout.png create mode 100644 doc/arch/cache/images/ats-cache-write-cursor.png create mode 100644 doc/arch/cache/images/cache-directory-structure.png create mode 100644 doc/arch/cache/images/cache-doc-layout-3-2-0.png create mode 100644 doc/arch/cache/images/cache-doc-layout-4-0-1.png create mode 100644 doc/arch/cache/images/cache-multi-fragment.png create mode 100644 doc/arch/cache/images/cache-span-layout.png create mode 100644 doc/arch/cache/images/cache-spans.png create mode 100644 doc/arch/cache/images/cache-stripe-layout.png create mode 100644 doc/arch/cache/images/dir-bucket-assign.png create mode 100644 doc/arch/cache/images/dir-segment-bucket.png create mode 100644 doc/arch/cache/ram-cache.en.rst create mode 100644 doc/arch/cache/tier-storage.en.rst create mode 100644 doc/arch/hacking/config-var-impl.en.rst create mode 100644 doc/arch/hacking/index.en.rst create mode 100644 doc/arch/index.en.rst create mode 100644 doc/arch/proposals/hostdb.en.rst create mode 100644 doc/checkvers.py create mode 100644 doc/conf.py create mode 100644 doc/doap.rdf create mode 100644 doc/dot/ResponseDiag.dot create mode 100644 doc/dot/SimpleStateDiag.dot create mode 100644 doc/dot/SimpleStateDiagAPI.dot create mode 100644 doc/ext/traffic-server.py create mode 100644 doc/glossary.en.rst create mode 100644 doc/index.rst create mode 100644 doc/locale/ja/LC_MESSAGES/admin/cluster-howto.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/configuring-cache.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/configuring-traffic-server.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/event-logging-formats.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/explicit-proxy-caching.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/faqs.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/forward-proxy.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/getting-started.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/hierachical-caching.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/http-proxy-caching.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/index.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/monitoring-traffic.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/performance-tuning.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/reverse-proxy-http-redirects.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/security-options.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/traffic-server-error-messages.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/transparent-proxy.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/bridge.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/build.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/router-inline.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/wccp-configuration.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/working-log-files.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/admin/working-log-files/log-formats.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/cache/cache-api.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/cache/cache-appendix.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/cache/cache-arch.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/cache/cache-data-structures.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/cache/cache.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/cache/ram-cache.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/cache/tier-storage.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/hacking/index.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/index.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/arch/proposals/hostdb.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/glossary.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/index.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSAPI.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSDebug.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSHttpHookAdd.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSHttpOverridableConfig.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSHttpParserCreate.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSHttpTxnMilestoneGet.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSIOBufferCreate.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSInstallDirGet.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSLifecycleHookAdd.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSMBufferCreate.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSMimeHdrFieldValueStringGet.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSPluginInit.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSRemap.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSTrafficServerVersionGet.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSTypes.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSUrlCreate.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSUrlHostGet.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSUrlHostSet.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSUrlPercentEncode.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSUrlStringGet.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/TSmalloc.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/api/index.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/index.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/traffic_cop.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/traffic_line.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/traffic_logcat.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/traffic_logstats.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/traffic_manager.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/traffic_server.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/traffic_shell.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/traffic_top.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/tspush.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/tstop.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/commands/tsxs.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/cache.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/congestion.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/hosting.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/icp.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/index.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/ip_allow.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/log_hosts.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/logs_xml.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/parent.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/plugin.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/records.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/remap.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/splitdns.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/ssl_multicert.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/storage.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/update.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/configuration/volume.config.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/authproxy.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/balancer.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/buffer_upload.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/cacheurl.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/combo_handler.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/conf_remap.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/esi.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/geoip_acl.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/gzip.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/header_filter.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/header_rewrite.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/hipes.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/index.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/metalink.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/mysql_remap.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/regex_remap.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/stale_while_revalidate.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/stats_over_http.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/ts_lua.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/reference/plugins/xdebug.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/actions-guide.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/actions-guide/hosts-lookup-api.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/adding-statistics.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/adding-statistics/coupled-statistics.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/adding-statistics/viewing-statistics-using-traffic-line.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/continuations.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/continuations/how-to-activate-continuations.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/continuations/writing-handler-functions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/getting-started.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/getting-started/a-simple-plugin.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/getting-started/naming-conventions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/getting-started/plugin-registration-and-version-checking.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/basic-authorization-plugin.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/basic-authorization-plugin/implementing-the-handler-and-getting-a-handle-to-the-transaction.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/basic-authorization-plugin/setting-a-transaction-hook.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/basic-authorization-plugin/working-with-http-headers.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/blacklist-plugin.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/blacklist-plugin/accessing-the-transaction-being-processed.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/blacklist-plugin/setting-a-global-hook.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/blacklist-plugin/setting-up-a-transaction-hook.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/header-based-plugin-examples/blacklist-plugin/working-with-http-header-functions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/how-to-create-trafficserver-plugins.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/how-to-create-trafficserver-plugins/roadmap-for-creating-plugins.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers/guide-to-trafficserver-http-header-system.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers/guide-to-trafficserver-http-header-system/duplicate-mime-fields-are-not-coalesced.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers/guide-to-trafficserver-http-header-system/mime-fields-always-belong-to-an-associated-mime-header.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers/guide-to-trafficserver-http-header-system/release-marshal-buffer-handles.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers/http-headers.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers/marshal-buffers.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers/mime-headers.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-headers/urls.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-hooks-and-transactions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-hooks-and-transactions/adding-hooks.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-hooks-and-transactions/http-alternate-selection.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-hooks-and-transactions/http-sessions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-hooks-and-transactions/http-transactions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-hooks-and-transactions/initiate-http-connection.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-hooks-and-transactions/intercepting-http-transactions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-transformation-plugin.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-transformation-plugin/append-transform-plugin.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-transformation-plugin/sample-buffered-null-transformation-plugin.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/http-transformation-plugin/sample-null-transformation-plugin.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/index.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/guide-to-cache-api.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/guide-to-cache-api/errors.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/guide-to-cache-api/example.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/guide-to-cache-api/how-to-do-a-cache-remove.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/guide-to-cache-api/how-to-do-a-cache-write.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/io-buffers.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/net-vconnections.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/transformations.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/io-guide/vios.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/misc-interface-guide.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/misc-interface-guide/memory-allocation.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/misc-interface-guide/thread-functions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/misc-interface-guide/tsfopen-family.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/mutex-guide.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/new-protocol-plugins.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/plugin-configurations.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/plugin-management.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/plugin-management/guide-to-the-logging-api.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/plugin-management/reading-trafficserver-settings-and-statistics.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/preface.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/preface/how-to-use-this-book.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/preface/typographical-conventions.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/remap-plugin.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/remap-plugin/example-query-remap.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/sample-source-code.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/troubleshooting-tips.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/troubleshooting-tips/debugging-memory-leaks.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/troubleshooting-tips/unable-to-debug-tags.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/troubleshooting-tips/unable-to-load-plugins.en.po create mode 100644 doc/locale/ja/LC_MESSAGES/sdk/troubleshooting-tips/using-a-debugger.en.po create mode 100644 doc/mainpage.doc create mode 100644 doc/man/man_page_template.txt create mode 100644 doc/man/show_alarms.1 create mode 100644 doc/man/show_cache-stats.1 create mode 100644 doc/man/show_cache.1 create mode 100644 doc/man/show_cluster.1 create mode 100644 doc/man/show_dns-resolver.1 create mode 100644 doc/man/show_dns-stats.1 create mode 100644 doc/man/show_hostdb-stats.1 create mode 100644 doc/man/show_hostdb.1 create mode 100644 doc/man/show_http-stats.1 create mode 100644 doc/man/show_http-trans-stats.1 create mode 100644 doc/man/show_http.1 create mode 100644 doc/man/show_icp-stats.1 create mode 100644 doc/man/show_icp.1 create mode 100644 doc/man/show_logging-stats.1 create mode 100644 doc/man/show_logging.1 create mode 100644 doc/man/show_network.1 create mode 100644 doc/man/show_parent.1 create mode 100644 doc/man/show_port-tunnels.1 create mode 100644 doc/man/show_proxy-stats.1 create mode 100644 doc/man/show_proxy.1 create mode 100644 doc/man/show_remap.1 create mode 100644 doc/man/show_scheduled-update.1 create mode 100644 doc/man/show_security.1 create mode 100644 doc/man/show_socks.1 create mode 100644 doc/man/show_ssl.1 create mode 100644 doc/man/show_status.1 create mode 100644 doc/man/show_version.1 create mode 100644 doc/man/show_virtual-ip.1 create mode 100644 doc/manpages.py create mode 100644 doc/pip_requirements.txt create mode 100644 doc/reference/api/TSAPI.en.rst create mode 100644 doc/reference/api/TSActionCancel.en.rst create mode 100644 doc/reference/api/TSActionDone.en.rst create mode 100644 doc/reference/api/TSCacheRead.en.rst create mode 100644 doc/reference/api/TSCacheRemove.en.rst create mode 100644 doc/reference/api/TSCacheWrite.en.rst create mode 100644 doc/reference/api/TSConfigDataGet.en.rst create mode 100644 doc/reference/api/TSConfigGet.en.rst create mode 100644 doc/reference/api/TSConfigRelease.en.rst create mode 100644 doc/reference/api/TSConfigSet.en.rst create mode 100644 doc/reference/api/TSContCall.en.rst create mode 100644 doc/reference/api/TSContCreate.en.rst create mode 100644 doc/reference/api/TSContDataGet.en.rst create mode 100644 doc/reference/api/TSContDataSet.en.rst create mode 100644 doc/reference/api/TSContDestroy.en.rst create mode 100644 doc/reference/api/TSContMutexGet.en.rst create mode 100644 doc/reference/api/TSContSchedule.en.rst create mode 100644 doc/reference/api/TSDebug.en.rst create mode 100644 doc/reference/api/TSHostLookup.en.rst create mode 100644 doc/reference/api/TSHostLookupResultAddrGet.en.rst create mode 100644 doc/reference/api/TSHttpConnect.en.rst create mode 100644 doc/reference/api/TSHttpHdrClone.en.rst create mode 100644 doc/reference/api/TSHttpHdrCopy.en.rst create mode 100644 doc/reference/api/TSHttpHdrCreate.en.rst create mode 100644 doc/reference/api/TSHttpHdrDestroy.en.rst create mode 100644 doc/reference/api/TSHttpHdrLengthGet.en.rst create mode 100644 doc/reference/api/TSHttpHdrMethodGet.en.rst create mode 100644 doc/reference/api/TSHttpHdrMethodSet.en.rst create mode 100644 doc/reference/api/TSHttpHdrPrint.en.rst create mode 100644 doc/reference/api/TSHttpHdrReasonGet.en.rst create mode 100644 doc/reference/api/TSHttpHdrReasonLookup.en.rst create mode 100644 doc/reference/api/TSHttpHdrReasonSet.en.rst create mode 100644 doc/reference/api/TSHttpHdrStatusGet.en.rst create mode 100644 doc/reference/api/TSHttpHdrStatusSet.en.rst create mode 100644 doc/reference/api/TSHttpHdrTypeGet.en.rst create mode 100644 doc/reference/api/TSHttpHdrTypeSet.en.rst create mode 100644 doc/reference/api/TSHttpHdrUrlGet.en.rst create mode 100644 doc/reference/api/TSHttpHdrUrlSet.en.rst create mode 100644 doc/reference/api/TSHttpHdrVersionGet.en.rst create mode 100644 doc/reference/api/TSHttpHdrVersionSet.en.rst create mode 100644 doc/reference/api/TSHttpHookAdd.en.rst create mode 100644 doc/reference/api/TSHttpIsInternalRequest.en.rst create mode 100644 doc/reference/api/TSHttpOverridableConfig.en.rst create mode 100644 doc/reference/api/TSHttpParserCreate.en.rst create mode 100644 doc/reference/api/TSHttpSsnReenable.en.rst create mode 100644 doc/reference/api/TSHttpTxnCacheLookupStatusGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnCacheLookupUrlGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnCachedReqGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnCachedRespGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnClientReqGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnClientRespGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnErrorBodySet.en.rst create mode 100644 doc/reference/api/TSHttpTxnIntercept.en.rst create mode 100644 doc/reference/api/TSHttpTxnMilestoneGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnNextHopAddrGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnParentProxySet.en.rst create mode 100644 doc/reference/api/TSHttpTxnReenable.en.rst create mode 100644 doc/reference/api/TSHttpTxnServerAddrGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnServerIntercept.en.rst create mode 100644 doc/reference/api/TSHttpTxnServerReqGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnServerRespGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnSsnGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnTransformRespGet.en.rst create mode 100644 doc/reference/api/TSHttpTxnTransformedRespCache.en.rst create mode 100644 doc/reference/api/TSHttpTxnUntransformedRespCache.en.rst create mode 100644 doc/reference/api/TSIOBufferBlockReadStart.en.rst create mode 100644 doc/reference/api/TSIOBufferCopy.en.rst create mode 100644 doc/reference/api/TSIOBufferCreate.en.rst create mode 100644 doc/reference/api/TSInstallDirGet.en.rst create mode 100644 doc/reference/api/TSLifecycleHookAdd.en.rst create mode 100644 doc/reference/api/TSMBufferCreate.en.rst create mode 100644 doc/reference/api/TSMgmtCounterGet.en.rst create mode 100644 doc/reference/api/TSMgmtFloatGet.en.rst create mode 100644 doc/reference/api/TSMgmtIntGet.en.rst create mode 100644 doc/reference/api/TSMgmtStringGet.en.rst create mode 100644 doc/reference/api/TSMgmtUpdateRegister.en.rst create mode 100644 doc/reference/api/TSMimeHdrClone.en.rst create mode 100644 doc/reference/api/TSMimeHdrCopy.en.rst create mode 100644 doc/reference/api/TSMimeHdrCreate.en.rst create mode 100644 doc/reference/api/TSMimeHdrDestroy.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldAppend.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldClone.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldCopy.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldCopyValues.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldCreate.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldDestroy.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldFind.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldGet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldLengthGet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldNameGet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldNameSet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldNext.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldNextDup.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldRemove.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueAppend.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueDateInsert.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueDateSet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueIntSet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueStringGet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueStringInsert.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueStringSet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueUintInsert.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValueUintSet.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValuesClear.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldValuesCount.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldsClear.en.rst create mode 100644 doc/reference/api/TSMimeHdrFieldsCount.en.rst create mode 100644 doc/reference/api/TSMimeHdrLengthGet.en.rst create mode 100644 doc/reference/api/TSMimeHdrParse.en.rst create mode 100644 doc/reference/api/TSMimeHdrPrint.en.rst create mode 100644 doc/reference/api/TSMimeParserClear.en.rst create mode 100644 doc/reference/api/TSMimeParserCreate.en.rst create mode 100644 doc/reference/api/TSMimeParserDestroy.en.rst create mode 100644 doc/reference/api/TSMutexCreate.en.rst create mode 100644 doc/reference/api/TSMutexLock.en.rst create mode 100644 doc/reference/api/TSMutexLockTry.en.rst create mode 100644 doc/reference/api/TSMutexUnlock.en.rst create mode 100644 doc/reference/api/TSNetAccept.en.rst create mode 100644 doc/reference/api/TSNetAcceptNamedProtocol.en.rst create mode 100644 doc/reference/api/TSNetConnect.en.rst create mode 100644 doc/reference/api/TSPluginInit.en.rst create mode 100644 doc/reference/api/TSRemap.en.rst create mode 100644 doc/reference/api/TSTextLogObjectCreate.en.rst create mode 100644 doc/reference/api/TSThreadCreate.en.rst create mode 100644 doc/reference/api/TSThreadDestroy.en.rst create mode 100644 doc/reference/api/TSThreadInit.en.rst create mode 100644 doc/reference/api/TSThreadSelf.en.rst create mode 100644 doc/reference/api/TSTrafficServerVersionGet.en.rst create mode 100644 doc/reference/api/TSTransformCreate.en.rst create mode 100644 doc/reference/api/TSTransformOutputVConnGet.en.rst create mode 100644 doc/reference/api/TSTypes.en.rst create mode 100644 doc/reference/api/TSUrlCreate.en.rst create mode 100644 doc/reference/api/TSUrlDestroy.en.rst create mode 100644 doc/reference/api/TSUrlFtpTypeGet.en.rst create mode 100644 doc/reference/api/TSUrlFtpTypeSet.en.rst create mode 100644 doc/reference/api/TSUrlHostGet.en.rst create mode 100644 doc/reference/api/TSUrlHostSet.en.rst create mode 100644 doc/reference/api/TSUrlPercentEncode.en.rst create mode 100644 doc/reference/api/TSUrlStringGet.en.rst create mode 100644 doc/reference/api/TSVConnAbort.en.rst create mode 100644 doc/reference/api/TSVConnCacheObjectSizeGet.en.rst create mode 100644 doc/reference/api/TSVConnClose.en.rst create mode 100644 doc/reference/api/TSVConnClosedGet.en.rst create mode 100644 doc/reference/api/TSVConnFdCreate.en.rst create mode 100644 doc/reference/api/TSVConnRead.en.rst create mode 100644 doc/reference/api/TSVConnReadVIOGet.en.rst create mode 100644 doc/reference/api/TSVConnShutdown.en.rst create mode 100644 doc/reference/api/TSVConnWrite.en.rst create mode 100644 doc/reference/api/TSVConnWriteVIOGet.en.rst create mode 100644 doc/reference/api/TSVIOBufferGet.en.rst create mode 100644 doc/reference/api/TSVIOContGet.en.rst create mode 100644 doc/reference/api/TSVIOMutexGet.en.rst create mode 100644 doc/reference/api/TSVIONBytesGet.en.rst create mode 100644 doc/reference/api/TSVIONBytesSet.en.rst create mode 100644 doc/reference/api/TSVIONDoneGet.en.rst create mode 100644 doc/reference/api/TSVIONDoneSet.en.rst create mode 100644 doc/reference/api/TSVIONTodoGet.en.rst create mode 100644 doc/reference/api/TSVIOReaderGet.en.rst create mode 100644 doc/reference/api/TSVIOReenable.en.rst create mode 100644 doc/reference/api/TSVIOVConnGet.en.rst create mode 100644 doc/reference/api/TSfclose.en.rst create mode 100644 doc/reference/api/TSfflush.en.rst create mode 100644 doc/reference/api/TSfgets.en.rst create mode 100644 doc/reference/api/TSfopen.en.rst create mode 100644 doc/reference/api/TSfread.en.rst create mode 100644 doc/reference/api/TSfwrite.en.rst create mode 100644 doc/reference/api/TSmalloc.en.rst create mode 100644 doc/reference/api/index.en.rst create mode 100644 doc/reference/commands/index.en.rst create mode 100644 doc/reference/commands/traffic_cop.en.rst create mode 100644 doc/reference/commands/traffic_line.en.rst create mode 100644 doc/reference/commands/traffic_logcat.en.rst create mode 100644 doc/reference/commands/traffic_logstats.en.rst create mode 100644 doc/reference/commands/traffic_manager.en.rst create mode 100644 doc/reference/commands/traffic_server.en.rst create mode 100644 doc/reference/commands/traffic_shell.en.rst create mode 100644 doc/reference/commands/traffic_top.en.rst create mode 100644 doc/reference/commands/tspush.en.rst create mode 100644 doc/reference/commands/tsxs.en.rst create mode 100644 doc/reference/configuration/cache.config.en.rst create mode 100644 doc/reference/configuration/congestion.config.en.rst create mode 100644 doc/reference/configuration/hosting.config.en.rst create mode 100644 doc/reference/configuration/icp.config.en.rst create mode 100644 doc/reference/configuration/index.en.rst create mode 100644 doc/reference/configuration/ip_allow.config.en.rst create mode 100644 doc/reference/configuration/log_hosts.config.en.rst create mode 100644 doc/reference/configuration/logs_xml.config.en.rst create mode 100644 doc/reference/configuration/parent.config.en.rst create mode 100644 doc/reference/configuration/plugin.config.en.rst create mode 100644 doc/reference/configuration/records.config.en.rst create mode 100644 doc/reference/configuration/remap.config.en.rst create mode 100644 doc/reference/configuration/splitdns.config.en.rst create mode 100644 doc/reference/configuration/ssl_multicert.config.en.rst create mode 100644 doc/reference/configuration/storage.config.en.rst create mode 100644 doc/reference/configuration/update.config.en.rst create mode 100644 doc/reference/configuration/volume.config.en.rst create mode 100644 doc/reference/plugins/authproxy.en.rst create mode 100644 doc/reference/plugins/background_fetch.en.rst create mode 100644 doc/reference/plugins/balancer.en.rst create mode 100644 doc/reference/plugins/buffer_upload.en.rst create mode 100644 doc/reference/plugins/cacheurl.en.rst create mode 100644 doc/reference/plugins/combo_handler.en.rst create mode 100644 doc/reference/plugins/conf_remap.en.rst create mode 100644 doc/reference/plugins/esi.en.rst create mode 100644 doc/reference/plugins/geoip_acl.en.rst create mode 100644 doc/reference/plugins/gzip.en.rst create mode 100644 doc/reference/plugins/header_rewrite.en.rst create mode 100644 doc/reference/plugins/hipes.en.rst create mode 100644 doc/reference/plugins/index.en.rst create mode 100644 doc/reference/plugins/metalink.en.rst create mode 100644 doc/reference/plugins/mysql_remap.en.rst create mode 100644 doc/reference/plugins/regex_remap.en.rst create mode 100644 doc/reference/plugins/s3_auth.en.rst create mode 100644 doc/reference/plugins/stale_while_revalidate.en.rst create mode 100644 doc/reference/plugins/stats_over_http.en.rst create mode 100644 doc/reference/plugins/tcpinfo.en.rst create mode 100644 doc/reference/plugins/ts_lua.en.rst create mode 100644 doc/reference/plugins/xdebug.en.rst create mode 100755 doc/sbuild create mode 100644 doc/sdk/actions-guide.en.rst create mode 100644 doc/sdk/actions-guide/hosts-lookup-api.en.rst create mode 100644 doc/sdk/adding-statistics.en.rst create mode 100644 doc/sdk/adding-statistics/coupled-statistics.en.rst create mode 100644 doc/sdk/adding-statistics/viewing-statistics-using-traffic-line.en.rst create mode 100644 doc/sdk/continuations.en.rst create mode 100644 doc/sdk/continuations/how-to-activate-continuations.en.rst create mode 100644 doc/sdk/continuations/writing-handler-functions.en.rst create mode 100644 doc/sdk/getting-started.en.rst create mode 100644 doc/sdk/getting-started/a-simple-plugin.en.rst create mode 100644 doc/sdk/getting-started/naming-conventions.en.rst create mode 100644 doc/sdk/getting-started/plugin-registration-and-version-checking.en.rst create mode 100644 doc/sdk/header-based-plugin-examples.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/basic-authorization-plugin.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/basic-authorization-plugin/implementing-the-handler-and-getting-a-handle-to-the-transaction.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/basic-authorization-plugin/setting-a-transaction-hook.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/basic-authorization-plugin/working-with-http-headers.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/blacklist-plugin.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/blacklist-plugin/accessing-the-transaction-being-processed.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/blacklist-plugin/setting-a-global-hook.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/blacklist-plugin/setting-up-a-transaction-hook.en.rst create mode 100644 doc/sdk/header-based-plugin-examples/blacklist-plugin/working-with-http-header-functions.en.rst create mode 100644 doc/sdk/how-to-create-trafficserver-plugins.en.rst create mode 100644 doc/sdk/how-to-create-trafficserver-plugins/roadmap-for-creating-plugins.en.rst create mode 100644 doc/sdk/http-headers.en.rst create mode 100644 doc/sdk/http-headers/guide-to-trafficserver-http-header-system.en.rst create mode 100644 doc/sdk/http-headers/guide-to-trafficserver-http-header-system/duplicate-mime-fields-are-not-coalesced.en.rst create mode 100644 doc/sdk/http-headers/guide-to-trafficserver-http-header-system/mime-fields-always-belong-to-an-associated-mime-header.en.rst create mode 100644 doc/sdk/http-headers/guide-to-trafficserver-http-header-system/release-marshal-buffer-handles.en.rst create mode 100644 doc/sdk/http-headers/http-headers.en.rst create mode 100644 doc/sdk/http-headers/marshal-buffers.en.rst create mode 100644 doc/sdk/http-headers/mime-headers.en.rst create mode 100644 doc/sdk/http-headers/urls.en.rst create mode 100644 doc/sdk/http-hooks-and-transactions.en.rst create mode 100644 doc/sdk/http-hooks-and-transactions/adding-hooks.en.rst create mode 100644 doc/sdk/http-hooks-and-transactions/http-alternate-selection.en.rst create mode 100644 doc/sdk/http-hooks-and-transactions/http-sessions.en.rst create mode 100644 doc/sdk/http-hooks-and-transactions/http-transactions.en.rst create mode 100644 doc/sdk/http-hooks-and-transactions/initiate-http-connection.en.rst create mode 100644 doc/sdk/http-hooks-and-transactions/intercepting-http-transactions.en.rst create mode 100644 doc/sdk/http-transformation-plugin.en.rst create mode 100644 doc/sdk/http-transformation-plugin/append-transform-plugin.en.rst create mode 100644 doc/sdk/http-transformation-plugin/sample-buffered-null-transformation-plugin.en.rst create mode 100644 doc/sdk/http-transformation-plugin/sample-null-transformation-plugin.en.rst create mode 100644 doc/sdk/index.en.rst create mode 100644 doc/sdk/io-guide.en.rst create mode 100644 doc/sdk/io-guide/guide-to-cache-api.en.rst create mode 100644 doc/sdk/io-guide/guide-to-cache-api/errors.en.rst create mode 100644 doc/sdk/io-guide/guide-to-cache-api/example.en.rst create mode 100644 doc/sdk/io-guide/guide-to-cache-api/how-to-do-a-cache-remove.en.rst create mode 100644 doc/sdk/io-guide/guide-to-cache-api/how-to-do-a-cache-write.en.rst create mode 100644 doc/sdk/io-guide/io-buffers.en.rst create mode 100644 doc/sdk/io-guide/net-vconnections.en.rst create mode 100644 doc/sdk/io-guide/transformations.en.rst create mode 100644 doc/sdk/io-guide/vios.en.rst create mode 100644 doc/sdk/misc-interface-guide.en.rst create mode 100644 doc/sdk/misc-interface-guide/memory-allocation.en.rst create mode 100644 doc/sdk/misc-interface-guide/thread-functions.en.rst create mode 100644 doc/sdk/misc-interface-guide/tsfopen-family.en.rst create mode 100644 doc/sdk/mutex-guide.en.rst create mode 100644 doc/sdk/new-protocol-plugins.en.rst create mode 100644 doc/sdk/plugin-configurations.en.rst create mode 100644 doc/sdk/plugin-management.en.rst create mode 100644 doc/sdk/plugin-management/guide-to-the-logging-api.en.rst create mode 100644 doc/sdk/plugin-management/reading-trafficserver-settings-and-statistics.en.rst create mode 100644 doc/sdk/preface.en.rst create mode 100644 doc/sdk/preface/how-to-use-this-book.en.rst create mode 100644 doc/sdk/preface/typographical-conventions.en.rst create mode 100644 doc/sdk/remap-plugin.en.rst create mode 100644 doc/sdk/remap-plugin/example-query-remap.en.rst create mode 100644 doc/sdk/sample-source-code.en.rst create mode 100755 doc/sdk/skeleton create mode 100644 doc/sdk/troubleshooting-tips.en.rst create mode 100644 doc/sdk/troubleshooting-tips/debugging-memory-leaks.en.rst create mode 100644 doc/sdk/troubleshooting-tips/unable-to-debug-tags.en.rst create mode 100644 doc/sdk/troubleshooting-tips/unable-to-load-plugins.en.rst create mode 100644 doc/sdk/troubleshooting-tips/using-a-debugger.en.rst create mode 100644 doc/static/images/admin/alarm.jpg create mode 100644 doc/static/images/admin/ats-basic-traffic.png create mode 100644 doc/static/images/admin/ats-traffic-bridge.png create mode 100644 doc/static/images/admin/cache_hit.jpg create mode 100644 doc/static/images/admin/cache_miss.jpg create mode 100644 doc/static/images/admin/cachehrc.jpg create mode 100644 doc/static/images/admin/configur.jpg create mode 100644 doc/static/images/admin/dash.jpg create mode 100644 doc/static/images/admin/dash3.jpg create mode 100644 doc/static/images/admin/firewall.jpg create mode 100644 doc/static/images/admin/httprvs.jpg create mode 100644 doc/static/images/admin/logcolat.jpg create mode 100644 doc/static/images/admin/monitor.jpg create mode 100644 doc/static/images/admin/netscape_common_format.jpg create mode 100644 doc/static/images/admin/netscape_extended2_format.jpg create mode 100644 doc/static/images/admin/netscape_extended_format.jpg create mode 100644 doc/static/images/admin/process.jpg create mode 100644 doc/static/images/admin/records.jpg create mode 100644 doc/static/images/admin/revproxy.jpg create mode 100644 doc/static/images/admin/squid_format.jpg create mode 100644 doc/static/images/admin/ssl_c.jpg create mode 100644 doc/static/images/admin/ssl_os.jpg create mode 100644 doc/static/images/admin/stat.jpg create mode 100644 doc/static/images/admin/summary-mixt.log_file.jpg create mode 100644 doc/static/images/admin/wccp-shared-outer.png create mode 100644 doc/static/images/companies/akamai.png create mode 100644 doc/static/images/companies/betfair.png create mode 100644 doc/static/images/companies/brainswear.png create mode 100644 doc/static/images/companies/comcast.png create mode 100644 doc/static/images/companies/devaus.png create mode 100644 doc/static/images/companies/frisch.png create mode 100644 doc/static/images/companies/godaddy.png create mode 100644 doc/static/images/companies/guruve.png create mode 100644 doc/static/images/companies/internode.png create mode 100644 doc/static/images/companies/linkedin.png create mode 100644 doc/static/images/companies/omniti.png create mode 100644 doc/static/images/companies/powerhttp.png create mode 100644 doc/static/images/companies/sic.png create mode 100644 doc/static/images/companies/suomen.png create mode 100644 doc/static/images/companies/taobao.png create mode 100644 doc/static/images/companies/vng.png create mode 100644 doc/static/images/companies/yahoo.png create mode 100644 doc/static/images/docbook/caution.png create mode 100644 doc/static/images/docbook/important.png create mode 100644 doc/static/images/docbook/note.png create mode 100644 doc/static/images/docbook/tip.png create mode 100644 doc/static/images/docbook/ts75.png create mode 100644 doc/static/images/download_background.png create mode 100644 doc/static/images/download_ts.png create mode 100644 doc/static/images/favicon.ico create mode 100644 doc/static/images/grey_paper.jpg create mode 100644 doc/static/images/press/trafficserver.svg create mode 100644 doc/static/images/press/ts_logo_tm_1024x196.png create mode 100644 doc/static/images/press/ts_logo_tm_1960x375.png create mode 100644 doc/static/images/press/ts_logo_tm_380x73.png create mode 100644 doc/static/images/press/tsicon.png create mode 100644 doc/static/images/press/tsicon_large.png create mode 100644 doc/static/images/press/tsicon_small.png create mode 100644 doc/static/images/sdk/Protocol_state_diagram.jpg create mode 100644 doc/static/images/sdk/Uses.jpg create mode 100644 doc/static/images/sdk/blacklist75.jpg create mode 100644 doc/static/images/sdk/event_sys80.jpg create mode 100644 doc/static/images/sdk/evt_plugin120.jpg create mode 100644 doc/static/images/sdk/http_header_struct.jpg create mode 100644 doc/static/images/sdk/http_headers.jpg create mode 100644 doc/static/images/sdk/http_state2.jpg create mode 100644 doc/static/images/sdk/marshall_buffers.jpg create mode 100644 doc/static/images/sdk/plugin_process.jpg create mode 100644 doc/static/images/sdk/protocol_evt.jpg create mode 100644 doc/static/images/sdk/protocol_sm_big.jpg create mode 100644 doc/static/images/sdk/transact75.jpg create mode 100644 doc/static/images/sdk/transact_hook75.jpg create mode 100644 doc/static/images/sdk/txn_sm.jpg create mode 100644 doc/static/images/sdk/vconn_buf_output.jpg create mode 100644 doc/static/images/sdk/vconn_buffer.jpg create mode 100644 doc/static/images/sdk/vconnection.jpg create mode 100644 doc/static/images/sdk/vconnection1.jpg create mode 100644 doc/static/images/sdk/vconnection2.jpg create mode 100644 doc/static/images/trans_logo_148x132.png create mode 100644 doc/static/images/trans_logo_150x30.gif create mode 100644 doc/static/images/trans_logo_150x30.png create mode 100644 doc/static/images/trans_logo_150x32.png create mode 100644 doc/static/images/trans_logo_160x35.png create mode 100644 doc/static/images/trans_logo_250x49.gif create mode 100644 doc/static/images/trans_logo_250x49.png create mode 100644 doc/static/images/trans_logo_350x69.gif create mode 100644 doc/static/images/trans_logo_350x69.png create mode 100644 doc/static/images/trans_logo_420x95.png create mode 100644 doc/static/images/trans_logo_603x132.png create mode 100644 doc/static/images/trans_logo_tm_380x69.png create mode 100644 doc/static/images/ts75.png create mode 100644 doc/static/images/warning.png create mode 100644 emacs-style create mode 100644 example/Makefile.am create mode 100644 example/Makefile.in create mode 100644 example/add-header/add-header.c create mode 100644 example/add-header/readme.txt create mode 100644 example/app-template/app-template.cc create mode 100644 example/app-template/app-template.h create mode 100644 example/app-template/records.config.in create mode 100644 example/append-transform/append-transform.c create mode 100644 example/append-transform/readme.txt create mode 100644 example/basic-auth/basic-auth.c create mode 100644 example/basic-auth/readme.txt create mode 100644 example/blacklist-0/blacklist-0.c create mode 100644 example/blacklist-1/blacklist-1.c create mode 100644 example/blacklist-1/blacklist.txt create mode 100644 example/blacklist-1/readme.txt create mode 100644 example/bnull-transform/bnull-transform.c create mode 100644 example/cache-scan/cache-scan.cc create mode 100644 example/file-1/file-1.c create mode 100644 example/file-1/readme.txt create mode 100644 example/hello/hello.c create mode 100644 example/intercept/intercept.cc create mode 100644 example/lifecycle-plugin/lifecycle-plugin.c create mode 100644 example/lifecycle-plugin/readme.txt create mode 100644 example/null-transform/null-transform.c create mode 100644 example/null-transform/readme.txt create mode 100644 example/output-header/output-header.c create mode 100644 example/output-header/readme create mode 100644 example/prefetch/prefetch-plugin-eg1.c create mode 100644 example/prefetch/readme.txt create mode 100644 example/prefetch/test-hns-plugin.c create mode 100644 example/protocol/Protocol.c create mode 100644 example/protocol/Protocol.h create mode 100644 example/protocol/README.txt create mode 100644 example/protocol/TxnSM.c create mode 100644 example/protocol/TxnSM.h create mode 100644 example/protocol/test/ProtocolClientTest.java create mode 100644 example/protocol/test/ProtocolServerTest.java create mode 100644 example/protocol/test/file_gen.sh create mode 100644 example/query-remap/query-remap.c create mode 100644 example/redirect-1/readme.txt create mode 100644 example/redirect-1/redirect-1.c create mode 100644 example/remap/build.txt create mode 100644 example/remap/remap.cc create mode 100644 example/replace-header/replace-header.c create mode 100644 example/response-header-1/response-header-1.c create mode 100644 example/secure-link/readme.txt create mode 100644 example/secure-link/secure-link.c create mode 100644 example/server-transform/server-transform.c create mode 100644 example/session-1/session-1.c create mode 100644 example/thread-1/readme.txt create mode 100644 example/thread-1/thread-1.c create mode 100644 example/thread-pool/README.txt create mode 100644 example/thread-pool/TESTPLAN.txt create mode 100644 example/thread-pool/include/Makefile.am create mode 100644 example/thread-pool/include/gen.c create mode 100644 example/thread-pool/include/gen_inc.sh create mode 100644 example/thread-pool/psi.c create mode 100644 example/thread-pool/test/SDKTest/SDKtest_server.config create mode 100644 example/thread-pool/test/SDKTest/psi_server.c create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/1.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/10.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/11.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/12.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/13.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/2.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/3.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/4.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/5.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/6.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/7.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/8.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/9.cfg create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc10_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc11_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc12_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc13_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc1_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc2_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc3_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc4_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc5_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc6_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc7_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc8_file.txt create mode 100644 example/thread-pool/test/SynTest/Tests/Psi/psi_files/tc9_file.txt create mode 100644 example/thread-pool/test/SynTest/system.cfg create mode 100644 example/thread-pool/test/SynTest/tests_psi.cfg create mode 100644 example/thread-pool/thread.c create mode 100644 example/thread-pool/thread.h create mode 100644 example/version/version.c create mode 100644 iocore/Makefile.am create mode 100644 iocore/Makefile.in create mode 100644 iocore/aio/AIO.cc create mode 100644 iocore/aio/I_AIO.h create mode 100644 iocore/aio/Inline.cc create mode 100644 iocore/aio/Makefile.am create mode 100644 iocore/aio/Makefile.in create mode 100644 iocore/aio/P_AIO.h create mode 100644 iocore/aio/sample.cfg create mode 100644 iocore/aio/test_AIO.cc create mode 100755 iocore/aio/test_AIO.sample create mode 100644 iocore/cache/Cache.cc create mode 100644 iocore/cache/CacheDir.cc create mode 100644 iocore/cache/CacheDisk.cc create mode 100644 iocore/cache/CacheHosting.cc create mode 100644 iocore/cache/CacheHttp.cc create mode 100644 iocore/cache/CacheLink.cc create mode 100644 iocore/cache/CachePages.cc create mode 100644 iocore/cache/CachePagesInternal.cc create mode 100644 iocore/cache/CacheRead.cc create mode 100644 iocore/cache/CacheTest.cc create mode 100644 iocore/cache/CacheVol.cc create mode 100644 iocore/cache/CacheWrite.cc create mode 100644 iocore/cache/I_Cache.h create mode 100644 iocore/cache/I_CacheDefs.h create mode 100644 iocore/cache/I_Store.h create mode 100644 iocore/cache/Inline.cc create mode 100644 iocore/cache/Makefile.am create mode 100644 iocore/cache/Makefile.in create mode 100644 iocore/cache/Notes create mode 100644 iocore/cache/P_Cache.h create mode 100644 iocore/cache/P_CacheArray.h create mode 100644 iocore/cache/P_CacheDir.h create mode 100644 iocore/cache/P_CacheDisk.h create mode 100644 iocore/cache/P_CacheHosting.h create mode 100644 iocore/cache/P_CacheHttp.h create mode 100644 iocore/cache/P_CacheInternal.h create mode 100644 iocore/cache/P_CacheTest.h create mode 100644 iocore/cache/P_CacheVol.h create mode 100644 iocore/cache/P_RamCache.h create mode 100644 iocore/cache/RamCacheCLFUS.cc create mode 100644 iocore/cache/RamCacheLRU.cc create mode 100644 iocore/cache/Store.cc create mode 100644 iocore/cluster/ClusterAPI.cc create mode 100644 iocore/cluster/ClusterCache.cc create mode 100644 iocore/cluster/ClusterConfig.cc create mode 100644 iocore/cluster/ClusterHandler.cc create mode 100644 iocore/cluster/ClusterHandlerBase.cc create mode 100644 iocore/cluster/ClusterHash.cc create mode 100644 iocore/cluster/ClusterLib.cc create mode 100644 iocore/cluster/ClusterLoadMonitor.cc create mode 100644 iocore/cluster/ClusterMachine.cc create mode 100644 iocore/cluster/ClusterProcessor.cc create mode 100644 iocore/cluster/ClusterRPC.cc create mode 100644 iocore/cluster/ClusterVConnection.cc create mode 100644 iocore/cluster/Inline.cc create mode 100644 iocore/cluster/Makefile.am create mode 100644 iocore/cluster/Makefile.in create mode 100644 iocore/cluster/P_Cluster.h create mode 100644 iocore/cluster/P_ClusterCache.h create mode 100644 iocore/cluster/P_ClusterCacheInternal.h create mode 100644 iocore/cluster/P_ClusterHandler.h create mode 100644 iocore/cluster/P_ClusterInline.h create mode 100644 iocore/cluster/P_ClusterInternal.h create mode 100644 iocore/cluster/P_ClusterLib.h create mode 100644 iocore/cluster/P_ClusterLoadMonitor.h create mode 100644 iocore/cluster/P_ClusterMachine.h create mode 100644 iocore/cluster/P_TimeTrace.h create mode 100644 iocore/cluster/test_I_Cluster.cc create mode 100644 iocore/cluster/test_P_Cluster.cc create mode 100644 iocore/dns/DNS.cc create mode 100644 iocore/dns/DNSConnection.cc create mode 100644 iocore/dns/I_DNS.h create mode 100644 iocore/dns/I_DNSProcessor.h create mode 100644 iocore/dns/I_SplitDNS.h create mode 100644 iocore/dns/I_SplitDNSProcessor.h create mode 100644 iocore/dns/Inline.cc create mode 100644 iocore/dns/Makefile.am create mode 100644 iocore/dns/Makefile.in create mode 100644 iocore/dns/P_DNS.h create mode 100644 iocore/dns/P_DNSConnection.h create mode 100644 iocore/dns/P_DNSProcessor.h create mode 100644 iocore/dns/P_SplitDNS.h create mode 100644 iocore/dns/P_SplitDNSProcessor.h create mode 100644 iocore/dns/SRV.h create mode 100644 iocore/dns/SplitDNS.cc create mode 100644 iocore/dns/test_I_DNS.cc create mode 100644 iocore/dns/test_P_DNS.cc create mode 100644 iocore/eventsystem/EventSystem.cc create mode 100644 iocore/eventsystem/IOBuffer.cc create mode 100644 iocore/eventsystem/I_Action.h create mode 100644 iocore/eventsystem/I_Continuation.h create mode 100644 iocore/eventsystem/I_EThread.h create mode 100644 iocore/eventsystem/I_Event.h create mode 100644 iocore/eventsystem/I_EventProcessor.h create mode 100644 iocore/eventsystem/I_EventSystem.h create mode 100644 iocore/eventsystem/I_IOBuffer.h create mode 100644 iocore/eventsystem/I_Lock.h create mode 100644 iocore/eventsystem/I_PriorityEventQueue.h create mode 100644 iocore/eventsystem/I_Processor.h create mode 100644 iocore/eventsystem/I_ProtectedQueue.h create mode 100644 iocore/eventsystem/I_ProxyAllocator.h create mode 100644 iocore/eventsystem/I_SocketManager.h create mode 100644 iocore/eventsystem/I_Tasks.h create mode 100644 iocore/eventsystem/I_Thread.h create mode 100644 iocore/eventsystem/I_VConnection.h create mode 100644 iocore/eventsystem/I_VIO.h create mode 100644 iocore/eventsystem/Inline.cc create mode 100644 iocore/eventsystem/Lock.cc create mode 100644 iocore/eventsystem/Makefile.am create mode 100644 iocore/eventsystem/Makefile.in create mode 100644 iocore/eventsystem/PQ-List.cc create mode 100644 iocore/eventsystem/P_EventSystem.h create mode 100644 iocore/eventsystem/P_Freer.h create mode 100644 iocore/eventsystem/P_IOBuffer.h create mode 100644 iocore/eventsystem/P_ProtectedQueue.h create mode 100644 iocore/eventsystem/P_Thread.h create mode 100644 iocore/eventsystem/P_UnixEThread.h create mode 100644 iocore/eventsystem/P_UnixEvent.h create mode 100644 iocore/eventsystem/P_UnixEventProcessor.h create mode 100644 iocore/eventsystem/P_UnixSocketManager.h create mode 100644 iocore/eventsystem/P_VConnection.h create mode 100644 iocore/eventsystem/P_VIO.h create mode 100644 iocore/eventsystem/Processor.cc create mode 100644 iocore/eventsystem/ProtectedQueue.cc create mode 100644 iocore/eventsystem/ProxyAllocator.cc create mode 100644 iocore/eventsystem/SocketManager.cc create mode 100644 iocore/eventsystem/Tasks.cc create mode 100644 iocore/eventsystem/Thread.cc create mode 100644 iocore/eventsystem/UnixEThread.cc create mode 100644 iocore/eventsystem/UnixEvent.cc create mode 100644 iocore/eventsystem/UnixEventProcessor.cc create mode 100644 iocore/eventsystem/test_Buffer.cc create mode 100644 iocore/eventsystem/test_Event.cc create mode 100644 iocore/eventsystem/test_I_Buffer.cc create mode 100644 iocore/eventsystem/test_I_Event.cc create mode 100644 iocore/eventsystem/test_P_Buffer.cc create mode 100644 iocore/eventsystem/test_P_Event.cc create mode 100644 iocore/hostdb/HostDB.cc create mode 100644 iocore/hostdb/I_HostDB.h create mode 100644 iocore/hostdb/I_HostDBProcessor.h create mode 100644 iocore/hostdb/Inline.cc create mode 100644 iocore/hostdb/Makefile.am create mode 100644 iocore/hostdb/Makefile.in create mode 100644 iocore/hostdb/MultiCache.cc create mode 100644 iocore/hostdb/P_HostDB.h create mode 100644 iocore/hostdb/P_HostDBProcessor.h create mode 100644 iocore/hostdb/P_MultiCache.h create mode 100644 iocore/hostdb/include/Machine.h create mode 100644 iocore/hostdb/test_I_HostDB.cc create mode 100644 iocore/hostdb/test_P_HostDB.cc create mode 100644 iocore/net/Connection.cc create mode 100644 iocore/net/I_Net.h create mode 100644 iocore/net/I_NetProcessor.h create mode 100644 iocore/net/I_NetVConnection.h create mode 100644 iocore/net/I_SessionAccept.h create mode 100644 iocore/net/I_Socks.h create mode 100644 iocore/net/I_UDPConnection.h create mode 100644 iocore/net/I_UDPNet.h create mode 100644 iocore/net/I_UDPPacket.h create mode 100644 iocore/net/Inline.cc create mode 100644 iocore/net/Makefile.am create mode 100644 iocore/net/Makefile.in create mode 100644 iocore/net/Net.cc create mode 100644 iocore/net/NetTest-http-server.c create mode 100644 iocore/net/NetVCTest.cc create mode 100644 iocore/net/NetVConnection.cc create mode 100644 iocore/net/P_CompletionUtil.h create mode 100644 iocore/net/P_Connection.h create mode 100644 iocore/net/P_InkBulkIO.h create mode 100644 iocore/net/P_LibBulkIO.h create mode 100644 iocore/net/P_Net.h create mode 100644 iocore/net/P_NetAccept.h create mode 100644 iocore/net/P_NetVCTest.h create mode 100644 iocore/net/P_NetVConnection.h create mode 100644 iocore/net/P_SSLCertLookup.h create mode 100644 iocore/net/P_SSLConfig.h create mode 100644 iocore/net/P_SSLNetAccept.h create mode 100644 iocore/net/P_SSLNetProcessor.h create mode 100644 iocore/net/P_SSLNetVConnection.h create mode 100644 iocore/net/P_SSLNextProtocolAccept.h create mode 100644 iocore/net/P_SSLNextProtocolSet.h create mode 100644 iocore/net/P_SSLUtils.h create mode 100644 iocore/net/P_Socks.h create mode 100644 iocore/net/P_UDPConnection.h create mode 100644 iocore/net/P_UDPIOEvent.h create mode 100644 iocore/net/P_UDPNet.h create mode 100644 iocore/net/P_UDPPacket.h create mode 100644 iocore/net/P_UnixCompletionUtil.h create mode 100644 iocore/net/P_UnixNet.h create mode 100644 iocore/net/P_UnixNetProcessor.h create mode 100644 iocore/net/P_UnixNetState.h create mode 100644 iocore/net/P_UnixNetVConnection.h create mode 100644 iocore/net/P_UnixPollDescriptor.h create mode 100644 iocore/net/P_UnixUDPConnection.h create mode 100644 iocore/net/SSLCertLookup.cc create mode 100644 iocore/net/SSLConfig.cc create mode 100644 iocore/net/SSLNetAccept.cc create mode 100644 iocore/net/SSLNetProcessor.cc create mode 100644 iocore/net/SSLNetVConnection.cc create mode 100644 iocore/net/SSLNextProtocolAccept.cc create mode 100644 iocore/net/SSLNextProtocolSet.cc create mode 100644 iocore/net/SSLUtils.cc create mode 100644 iocore/net/SessionAccept.cc create mode 100644 iocore/net/Socks.cc create mode 100644 iocore/net/UDPIOEvent.cc create mode 100644 iocore/net/UnixConnection.cc create mode 100644 iocore/net/UnixNet.cc create mode 100644 iocore/net/UnixNetAccept.cc create mode 100644 iocore/net/UnixNetPages.cc create mode 100644 iocore/net/UnixNetProcessor.cc create mode 100644 iocore/net/UnixNetVConnection.cc create mode 100644 iocore/net/UnixUDPConnection.cc create mode 100644 iocore/net/UnixUDPNet.cc create mode 100644 iocore/net/test_I_Net.cc create mode 100644 iocore/net/test_I_UDPNet.cc create mode 100644 iocore/net/test_P_Net.cc create mode 100644 iocore/net/test_P_UDPNet.cc create mode 100644 iocore/net/test_certlookup.cc create mode 100644 iocore/utils/I_Machine.h create mode 100644 iocore/utils/I_OneWayMultiTunnel.h create mode 100644 iocore/utils/I_OneWayTunnel.h create mode 100644 iocore/utils/Machine.cc create mode 100644 iocore/utils/Makefile.am create mode 100644 iocore/utils/Makefile.in create mode 100644 iocore/utils/OneWayMultiTunnel.cc create mode 100644 iocore/utils/OneWayTunnel.cc create mode 100644 iocore/utils/diags.i create mode 100644 lib/Makefile.am create mode 100644 lib/Makefile.in create mode 100644 lib/atscppapi/Makefile.am create mode 100644 lib/atscppapi/Makefile.in create mode 100644 lib/atscppapi/examples/Makefile.am create mode 100644 lib/atscppapi/examples/Makefile.in create mode 100644 lib/atscppapi/examples/async_http_fetch/AsyncHttpFetch.cc create mode 100644 lib/atscppapi/examples/async_http_fetch/Makefile.am create mode 100644 lib/atscppapi/examples/async_http_fetch/Makefile.in create mode 100644 lib/atscppapi/examples/async_timer/AsyncTimer.cc create mode 100644 lib/atscppapi/examples/async_timer/Makefile.am create mode 100644 lib/atscppapi/examples/async_timer/Makefile.in create mode 100644 lib/atscppapi/examples/boom/Makefile.am create mode 100644 lib/atscppapi/examples/boom/Makefile.in create mode 100644 lib/atscppapi/examples/boom/README.txt create mode 100644 lib/atscppapi/examples/boom/boom.cc create mode 100644 lib/atscppapi/examples/clientredirect/ClientRedirect.cc create mode 100644 lib/atscppapi/examples/clientredirect/Makefile.am create mode 100644 lib/atscppapi/examples/clientredirect/Makefile.in create mode 100644 lib/atscppapi/examples/clientrequest/ClientRequest.cc create mode 100644 lib/atscppapi/examples/clientrequest/Makefile.am create mode 100644 lib/atscppapi/examples/clientrequest/Makefile.in create mode 100644 lib/atscppapi/examples/customresponse/CustomResponse.cc create mode 100644 lib/atscppapi/examples/customresponse/Makefile.am create mode 100644 lib/atscppapi/examples/customresponse/Makefile.in create mode 100644 lib/atscppapi/examples/globalhook/GlobalHookPlugin.cc create mode 100644 lib/atscppapi/examples/globalhook/Makefile.am create mode 100644 lib/atscppapi/examples/globalhook/Makefile.in create mode 100644 lib/atscppapi/examples/gzip_transformation/GzipTransformationPlugin.cc create mode 100644 lib/atscppapi/examples/gzip_transformation/Makefile.am create mode 100644 lib/atscppapi/examples/gzip_transformation/Makefile.in create mode 100644 lib/atscppapi/examples/helloworld/HelloWorldPlugin.cc create mode 100644 lib/atscppapi/examples/helloworld/Makefile.am create mode 100644 lib/atscppapi/examples/helloworld/Makefile.in create mode 100644 lib/atscppapi/examples/intercept/Makefile.am create mode 100644 lib/atscppapi/examples/intercept/Makefile.in create mode 100644 lib/atscppapi/examples/intercept/intercept.cc create mode 100644 lib/atscppapi/examples/internal_transaction_handling/InternalTransactionHandling.cc create mode 100644 lib/atscppapi/examples/internal_transaction_handling/Makefile.am create mode 100644 lib/atscppapi/examples/internal_transaction_handling/Makefile.in create mode 100644 lib/atscppapi/examples/logger_example/LoggerExample.cc create mode 100644 lib/atscppapi/examples/logger_example/Makefile.am create mode 100644 lib/atscppapi/examples/logger_example/Makefile.in create mode 100644 lib/atscppapi/examples/multiple_transaction_hooks/Makefile.am create mode 100644 lib/atscppapi/examples/multiple_transaction_hooks/Makefile.in create mode 100644 lib/atscppapi/examples/multiple_transaction_hooks/MultipleTransactionHookPlugins.cc create mode 100644 lib/atscppapi/examples/null_transformation_plugin/Makefile.am create mode 100644 lib/atscppapi/examples/null_transformation_plugin/Makefile.in create mode 100644 lib/atscppapi/examples/null_transformation_plugin/NullTransformationPlugin.cc create mode 100644 lib/atscppapi/examples/post_buffer/Makefile.am create mode 100644 lib/atscppapi/examples/post_buffer/Makefile.in create mode 100644 lib/atscppapi/examples/post_buffer/PostBuffer.cc create mode 100644 lib/atscppapi/examples/remap_plugin/Makefile.am create mode 100644 lib/atscppapi/examples/remap_plugin/Makefile.in create mode 100644 lib/atscppapi/examples/remap_plugin/RemapPlugin.cc create mode 100644 lib/atscppapi/examples/serverresponse/Makefile.am create mode 100644 lib/atscppapi/examples/serverresponse/Makefile.in create mode 100644 lib/atscppapi/examples/serverresponse/ServerResponse.cc create mode 100644 lib/atscppapi/examples/stat_example/Makefile.am create mode 100644 lib/atscppapi/examples/stat_example/Makefile.in create mode 100644 lib/atscppapi/examples/stat_example/StatExample.cc create mode 100644 lib/atscppapi/examples/timeout_example/Makefile.am create mode 100644 lib/atscppapi/examples/timeout_example/Makefile.in create mode 100644 lib/atscppapi/examples/timeout_example/TimeoutExamplePlugin.cc create mode 100644 lib/atscppapi/examples/transactionhook/Makefile.am create mode 100644 lib/atscppapi/examples/transactionhook/Makefile.in create mode 100644 lib/atscppapi/examples/transactionhook/TransactionHookPlugin.cc create mode 100644 lib/atscppapi/src/AsyncHttpFetch.cc create mode 100644 lib/atscppapi/src/AsyncTimer.cc create mode 100644 lib/atscppapi/src/CaseInsensitiveStringComparator.cc create mode 100644 lib/atscppapi/src/ClientRequest.cc create mode 100644 lib/atscppapi/src/GlobalPlugin.cc create mode 100644 lib/atscppapi/src/GzipDeflateTransformation.cc create mode 100644 lib/atscppapi/src/GzipInflateTransformation.cc create mode 100644 lib/atscppapi/src/Headers.cc create mode 100644 lib/atscppapi/src/HttpMethod.cc create mode 100644 lib/atscppapi/src/HttpVersion.cc create mode 100644 lib/atscppapi/src/InterceptPlugin.cc create mode 100644 lib/atscppapi/src/Logger.cc create mode 100644 lib/atscppapi/src/Makefile.am create mode 100644 lib/atscppapi/src/Makefile.in create mode 100644 lib/atscppapi/src/Plugin.cc create mode 100644 lib/atscppapi/src/RemapPlugin.cc create mode 100644 lib/atscppapi/src/Request.cc create mode 100644 lib/atscppapi/src/Response.cc create mode 100644 lib/atscppapi/src/Stat.cc create mode 100644 lib/atscppapi/src/Transaction.cc create mode 100644 lib/atscppapi/src/TransactionPlugin.cc create mode 100644 lib/atscppapi/src/TransformationPlugin.cc create mode 100644 lib/atscppapi/src/Url.cc create mode 100644 lib/atscppapi/src/include/atscppapi/Async.h create mode 100644 lib/atscppapi/src/include/atscppapi/AsyncHttpFetch.h create mode 100644 lib/atscppapi/src/include/atscppapi/AsyncTimer.h create mode 100644 lib/atscppapi/src/include/atscppapi/CaseInsensitiveStringComparator.h create mode 100644 lib/atscppapi/src/include/atscppapi/ClientRequest.h create mode 100644 lib/atscppapi/src/include/atscppapi/GlobalPlugin.h create mode 100644 lib/atscppapi/src/include/atscppapi/GzipDeflateTransformation.h create mode 100644 lib/atscppapi/src/include/atscppapi/GzipInflateTransformation.h create mode 100644 lib/atscppapi/src/include/atscppapi/Headers.h create mode 100644 lib/atscppapi/src/include/atscppapi/HttpMethod.h create mode 100644 lib/atscppapi/src/include/atscppapi/HttpStatus.h create mode 100644 lib/atscppapi/src/include/atscppapi/HttpVersion.h create mode 100644 lib/atscppapi/src/include/atscppapi/InterceptPlugin.h create mode 100644 lib/atscppapi/src/include/atscppapi/Logger.h create mode 100644 lib/atscppapi/src/include/atscppapi/Mutex.h create mode 100644 lib/atscppapi/src/include/atscppapi/Plugin.h create mode 100644 lib/atscppapi/src/include/atscppapi/PluginInit.h create mode 100644 lib/atscppapi/src/include/atscppapi/RemapPlugin.h create mode 100644 lib/atscppapi/src/include/atscppapi/Request.h create mode 100644 lib/atscppapi/src/include/atscppapi/Response.h create mode 100644 lib/atscppapi/src/include/atscppapi/Stat.h create mode 100644 lib/atscppapi/src/include/atscppapi/Transaction.h create mode 100644 lib/atscppapi/src/include/atscppapi/TransactionPlugin.h create mode 100644 lib/atscppapi/src/include/atscppapi/TransformationPlugin.h create mode 100644 lib/atscppapi/src/include/atscppapi/Url.h create mode 100644 lib/atscppapi/src/include/atscppapi/noncopyable.h create mode 100644 lib/atscppapi/src/include/atscppapi/shared_ptr.h create mode 100644 lib/atscppapi/src/include/atscppapi/utils.h create mode 100644 lib/atscppapi/src/include/logging_internal.h create mode 100644 lib/atscppapi/src/include/utils_internal.h create mode 100644 lib/atscppapi/src/utils.cc create mode 100644 lib/atscppapi/src/utils_internal.cc create mode 100644 lib/luajit/.gitignore create mode 100644 lib/luajit/COPYRIGHT create mode 100644 lib/luajit/Makefile create mode 100644 lib/luajit/README create mode 100644 lib/luajit/doc/bluequad-print.css create mode 100644 lib/luajit/doc/bluequad.css create mode 100644 lib/luajit/doc/changes.html create mode 100644 lib/luajit/doc/contact.html create mode 100644 lib/luajit/doc/ext_c_api.html create mode 100644 lib/luajit/doc/ext_ffi.html create mode 100644 lib/luajit/doc/ext_ffi_api.html create mode 100644 lib/luajit/doc/ext_ffi_semantics.html create mode 100644 lib/luajit/doc/ext_ffi_tutorial.html create mode 100644 lib/luajit/doc/ext_jit.html create mode 100644 lib/luajit/doc/extensions.html create mode 100644 lib/luajit/doc/faq.html create mode 100644 lib/luajit/doc/img/contact.png create mode 100644 lib/luajit/doc/install.html create mode 100644 lib/luajit/doc/luajit.html create mode 100644 lib/luajit/doc/running.html create mode 100644 lib/luajit/doc/status.html create mode 100644 lib/luajit/dynasm/dasm_arm.h create mode 100644 lib/luajit/dynasm/dasm_arm.lua create mode 100644 lib/luajit/dynasm/dasm_mips.h create mode 100644 lib/luajit/dynasm/dasm_mips.lua create mode 100644 lib/luajit/dynasm/dasm_ppc.h create mode 100644 lib/luajit/dynasm/dasm_ppc.lua create mode 100644 lib/luajit/dynasm/dasm_proto.h create mode 100644 lib/luajit/dynasm/dasm_x64.lua create mode 100644 lib/luajit/dynasm/dasm_x86.h create mode 100644 lib/luajit/dynasm/dasm_x86.lua create mode 100644 lib/luajit/dynasm/dynasm.lua create mode 100644 lib/luajit/etc/luajit.1 create mode 100644 lib/luajit/etc/luajit.pc create mode 100644 lib/luajit/src/.gitignore create mode 100644 lib/luajit/src/Makefile create mode 100644 lib/luajit/src/Makefile.dep create mode 100644 lib/luajit/src/host/.gitignore create mode 100644 lib/luajit/src/host/README create mode 100644 lib/luajit/src/host/buildvm.c create mode 100644 lib/luajit/src/host/buildvm.h create mode 100644 lib/luajit/src/host/buildvm_asm.c create mode 100644 lib/luajit/src/host/buildvm_fold.c create mode 100644 lib/luajit/src/host/buildvm_lib.c create mode 100644 lib/luajit/src/host/buildvm_peobj.c create mode 100644 lib/luajit/src/host/genminilua.lua create mode 100644 lib/luajit/src/host/minilua.c create mode 100644 lib/luajit/src/jit/.gitignore create mode 100644 lib/luajit/src/jit/bc.lua create mode 100644 lib/luajit/src/jit/bcsave.lua create mode 100644 lib/luajit/src/jit/dis_arm.lua create mode 100644 lib/luajit/src/jit/dis_mips.lua create mode 100644 lib/luajit/src/jit/dis_mipsel.lua create mode 100644 lib/luajit/src/jit/dis_ppc.lua create mode 100644 lib/luajit/src/jit/dis_x64.lua create mode 100644 lib/luajit/src/jit/dis_x86.lua create mode 100644 lib/luajit/src/jit/dump.lua create mode 100644 lib/luajit/src/jit/v.lua create mode 100644 lib/luajit/src/lauxlib.h create mode 100644 lib/luajit/src/lib_aux.c create mode 100644 lib/luajit/src/lib_base.c create mode 100644 lib/luajit/src/lib_bit.c create mode 100644 lib/luajit/src/lib_debug.c create mode 100644 lib/luajit/src/lib_ffi.c create mode 100644 lib/luajit/src/lib_init.c create mode 100644 lib/luajit/src/lib_io.c create mode 100644 lib/luajit/src/lib_jit.c create mode 100644 lib/luajit/src/lib_math.c create mode 100644 lib/luajit/src/lib_os.c create mode 100644 lib/luajit/src/lib_package.c create mode 100644 lib/luajit/src/lib_string.c create mode 100644 lib/luajit/src/lib_table.c create mode 100644 lib/luajit/src/lj.supp create mode 100644 lib/luajit/src/lj_alloc.c create mode 100644 lib/luajit/src/lj_alloc.h create mode 100644 lib/luajit/src/lj_api.c create mode 100644 lib/luajit/src/lj_arch.h create mode 100644 lib/luajit/src/lj_asm.c create mode 100644 lib/luajit/src/lj_asm.h create mode 100644 lib/luajit/src/lj_asm_arm.h create mode 100644 lib/luajit/src/lj_asm_mips.h create mode 100644 lib/luajit/src/lj_asm_ppc.h create mode 100644 lib/luajit/src/lj_asm_x86.h create mode 100644 lib/luajit/src/lj_bc.c create mode 100644 lib/luajit/src/lj_bc.h create mode 100644 lib/luajit/src/lj_bcdump.h create mode 100644 lib/luajit/src/lj_bcread.c create mode 100644 lib/luajit/src/lj_bcwrite.c create mode 100644 lib/luajit/src/lj_carith.c create mode 100644 lib/luajit/src/lj_carith.h create mode 100644 lib/luajit/src/lj_ccall.c create mode 100644 lib/luajit/src/lj_ccall.h create mode 100644 lib/luajit/src/lj_ccallback.c create mode 100644 lib/luajit/src/lj_ccallback.h create mode 100644 lib/luajit/src/lj_cconv.c create mode 100644 lib/luajit/src/lj_cconv.h create mode 100644 lib/luajit/src/lj_cdata.c create mode 100644 lib/luajit/src/lj_cdata.h create mode 100644 lib/luajit/src/lj_char.c create mode 100644 lib/luajit/src/lj_char.h create mode 100644 lib/luajit/src/lj_clib.c create mode 100644 lib/luajit/src/lj_clib.h create mode 100644 lib/luajit/src/lj_cparse.c create mode 100644 lib/luajit/src/lj_cparse.h create mode 100644 lib/luajit/src/lj_crecord.c create mode 100644 lib/luajit/src/lj_crecord.h create mode 100644 lib/luajit/src/lj_ctype.c create mode 100644 lib/luajit/src/lj_ctype.h create mode 100644 lib/luajit/src/lj_debug.c create mode 100644 lib/luajit/src/lj_debug.h create mode 100644 lib/luajit/src/lj_def.h create mode 100644 lib/luajit/src/lj_dispatch.c create mode 100644 lib/luajit/src/lj_dispatch.h create mode 100644 lib/luajit/src/lj_emit_arm.h create mode 100644 lib/luajit/src/lj_emit_mips.h create mode 100644 lib/luajit/src/lj_emit_ppc.h create mode 100644 lib/luajit/src/lj_emit_x86.h create mode 100644 lib/luajit/src/lj_err.c create mode 100644 lib/luajit/src/lj_err.h create mode 100644 lib/luajit/src/lj_errmsg.h create mode 100644 lib/luajit/src/lj_ff.h create mode 100644 lib/luajit/src/lj_ffrecord.c create mode 100644 lib/luajit/src/lj_ffrecord.h create mode 100644 lib/luajit/src/lj_frame.h create mode 100644 lib/luajit/src/lj_func.c create mode 100644 lib/luajit/src/lj_func.h create mode 100644 lib/luajit/src/lj_gc.c create mode 100644 lib/luajit/src/lj_gc.h create mode 100644 lib/luajit/src/lj_gdbjit.c create mode 100644 lib/luajit/src/lj_gdbjit.h create mode 100644 lib/luajit/src/lj_ir.c create mode 100644 lib/luajit/src/lj_ir.h create mode 100644 lib/luajit/src/lj_ircall.h create mode 100644 lib/luajit/src/lj_iropt.h create mode 100644 lib/luajit/src/lj_jit.h create mode 100644 lib/luajit/src/lj_lex.c create mode 100644 lib/luajit/src/lj_lex.h create mode 100644 lib/luajit/src/lj_lib.c create mode 100644 lib/luajit/src/lj_lib.h create mode 100644 lib/luajit/src/lj_load.c create mode 100644 lib/luajit/src/lj_mcode.c create mode 100644 lib/luajit/src/lj_mcode.h create mode 100644 lib/luajit/src/lj_meta.c create mode 100644 lib/luajit/src/lj_meta.h create mode 100644 lib/luajit/src/lj_obj.c create mode 100644 lib/luajit/src/lj_obj.h create mode 100644 lib/luajit/src/lj_opt_dce.c create mode 100644 lib/luajit/src/lj_opt_fold.c create mode 100644 lib/luajit/src/lj_opt_loop.c create mode 100644 lib/luajit/src/lj_opt_mem.c create mode 100644 lib/luajit/src/lj_opt_narrow.c create mode 100644 lib/luajit/src/lj_opt_sink.c create mode 100644 lib/luajit/src/lj_opt_split.c create mode 100644 lib/luajit/src/lj_parse.c create mode 100644 lib/luajit/src/lj_parse.h create mode 100644 lib/luajit/src/lj_record.c create mode 100644 lib/luajit/src/lj_record.h create mode 100644 lib/luajit/src/lj_snap.c create mode 100644 lib/luajit/src/lj_snap.h create mode 100644 lib/luajit/src/lj_state.c create mode 100644 lib/luajit/src/lj_state.h create mode 100644 lib/luajit/src/lj_str.c create mode 100644 lib/luajit/src/lj_str.h create mode 100644 lib/luajit/src/lj_strscan.c create mode 100644 lib/luajit/src/lj_strscan.h create mode 100644 lib/luajit/src/lj_tab.c create mode 100644 lib/luajit/src/lj_tab.h create mode 100644 lib/luajit/src/lj_target.h create mode 100644 lib/luajit/src/lj_target_arm.h create mode 100644 lib/luajit/src/lj_target_mips.h create mode 100644 lib/luajit/src/lj_target_ppc.h create mode 100644 lib/luajit/src/lj_target_x86.h create mode 100644 lib/luajit/src/lj_trace.c create mode 100644 lib/luajit/src/lj_trace.h create mode 100644 lib/luajit/src/lj_traceerr.h create mode 100644 lib/luajit/src/lj_udata.c create mode 100644 lib/luajit/src/lj_udata.h create mode 100644 lib/luajit/src/lj_vm.h create mode 100644 lib/luajit/src/lj_vmevent.c create mode 100644 lib/luajit/src/lj_vmevent.h create mode 100644 lib/luajit/src/lj_vmmath.c create mode 100644 lib/luajit/src/ljamalg.c create mode 100644 lib/luajit/src/lua.h create mode 100644 lib/luajit/src/lua.hpp create mode 100644 lib/luajit/src/luaconf.h create mode 100644 lib/luajit/src/luajit.c create mode 100644 lib/luajit/src/luajit.h create mode 100644 lib/luajit/src/lualib.h create mode 100644 lib/luajit/src/msvcbuild.bat create mode 100644 lib/luajit/src/ps4build.bat create mode 100644 lib/luajit/src/vm_arm.dasc create mode 100644 lib/luajit/src/vm_mips.dasc create mode 100644 lib/luajit/src/vm_ppc.dasc create mode 100644 lib/luajit/src/vm_ppcspe.dasc create mode 100644 lib/luajit/src/vm_x86.dasc create mode 100644 lib/luajit/src/xedkbuild.bat create mode 100644 lib/perl/MANIFEST create mode 100644 lib/perl/Makefile.PL create mode 100644 lib/perl/Makefile.am create mode 100644 lib/perl/Makefile.in create mode 100644 lib/perl/README create mode 100755 lib/perl/examples/forw_proxy_conf.pl create mode 100644 lib/perl/lib/Apache/TS.pm.in create mode 100644 lib/perl/lib/Apache/TS/AdminClient.pm create mode 100644 lib/perl/lib/Apache/TS/Config.pm create mode 100644 lib/perl/lib/Apache/TS/Config/Records.pm create mode 100644 lib/perl/t/Apache-TS-AdminClient.t create mode 100644 lib/records/I_RecAlarms.h create mode 100644 lib/records/I_RecCore.h create mode 100644 lib/records/I_RecDefs.h create mode 100644 lib/records/I_RecEvents.h create mode 100644 lib/records/I_RecHttp.h create mode 100644 lib/records/I_RecLocal.h create mode 100644 lib/records/I_RecMutex.h create mode 100644 lib/records/I_RecProcess.h create mode 100644 lib/records/I_RecSignals.h create mode 100644 lib/records/Makefile.am create mode 100644 lib/records/Makefile.in create mode 100644 lib/records/P_RecCore.cc create mode 100644 lib/records/P_RecCore.h create mode 100644 lib/records/P_RecDefs.h create mode 100644 lib/records/P_RecFile.h create mode 100644 lib/records/P_RecLocal.h create mode 100644 lib/records/P_RecMessage.h create mode 100644 lib/records/P_RecProcess.h create mode 100644 lib/records/P_RecTree.h create mode 100644 lib/records/P_RecUtils.h create mode 100644 lib/records/RecConfigParse.cc create mode 100644 lib/records/RecCore.cc create mode 100644 lib/records/RecDebug.cc create mode 100644 lib/records/RecFile.cc create mode 100644 lib/records/RecHttp.cc create mode 100644 lib/records/RecLocal.cc create mode 100644 lib/records/RecMessage.cc create mode 100644 lib/records/RecMutex.cc create mode 100644 lib/records/RecProcess.cc create mode 100644 lib/records/RecTree.cc create mode 100644 lib/records/RecUtils.cc create mode 100644 lib/records/test_I_RecLocal.cc create mode 100644 lib/records/test_I_RecProcess.cc create mode 100644 lib/records/test_P_RecProcess.cc create mode 100644 lib/records/test_RecProcess.i create mode 100644 lib/records/test_RecTree.cc create mode 100644 lib/records/test_RecordsConfig.cc create mode 100644 lib/records/test_RecordsConfig.h create mode 100644 lib/records/test_records.config create mode 100644 lib/ts/Allocator.h create mode 100644 lib/ts/Arena.cc create mode 100644 lib/ts/Arena.h create mode 100644 lib/ts/Bitops.cc create mode 100644 lib/ts/Bitops.h create mode 100644 lib/ts/Compatability.h create mode 100644 lib/ts/CompileParseRules.cc create mode 100644 lib/ts/Diags.cc create mode 100644 lib/ts/Diags.h create mode 100644 lib/ts/DynArray.h create mode 100644 lib/ts/EventNotify.cc create mode 100644 lib/ts/EventNotify.h create mode 100644 lib/ts/HostLookup.cc create mode 100644 lib/ts/HostLookup.h create mode 100644 lib/ts/INK_MD5.h create mode 100644 lib/ts/I_Layout.h create mode 100644 lib/ts/I_Version.h create mode 100644 lib/ts/InkErrno.h create mode 100644 lib/ts/InkPool.h create mode 100644 lib/ts/IntrusiveDList.h create mode 100644 lib/ts/IntrusivePtrTest.cc create mode 100644 lib/ts/IpMap.cc create mode 100644 lib/ts/IpMap.h create mode 100644 lib/ts/IpMapConf.cc create mode 100644 lib/ts/IpMapConf.h create mode 100644 lib/ts/IpMapTest.cc create mode 100644 lib/ts/Layout.cc create mode 100644 lib/ts/List.h create mode 100644 lib/ts/Makefile.am create mode 100644 lib/ts/Makefile.in create mode 100644 lib/ts/Map.h create mode 100644 lib/ts/MatcherUtils.cc create mode 100644 lib/ts/MatcherUtils.h create mode 100644 lib/ts/MimeTable.cc create mode 100644 lib/ts/MimeTable.h create mode 100644 lib/ts/ParseRules.cc create mode 100644 lib/ts/ParseRules.h create mode 100644 lib/ts/Ptr.h create mode 100644 lib/ts/RawHashTable.cc create mode 100644 lib/ts/RawHashTable.h create mode 100644 lib/ts/Regex.cc create mode 100644 lib/ts/Regex.h create mode 100644 lib/ts/Regression.cc create mode 100644 lib/ts/Regression.h create mode 100644 lib/ts/SimpleTokenizer.h create mode 100644 lib/ts/TestBox.h create mode 100644 lib/ts/TestHttpHeader.cc create mode 100644 lib/ts/TextBuffer.cc create mode 100644 lib/ts/TextBuffer.h create mode 100644 lib/ts/Tokenizer.cc create mode 100644 lib/ts/Tokenizer.h create mode 100644 lib/ts/Trie.h create mode 100644 lib/ts/TsBuffer.h create mode 100644 lib/ts/Vec.cc create mode 100644 lib/ts/Vec.h create mode 100644 lib/ts/Version.cc create mode 100644 lib/ts/apidefs.h.in create mode 100644 lib/ts/defalloc.h create mode 100644 lib/ts/fastlz.c create mode 100644 lib/ts/fastlz.h create mode 100644 lib/ts/ink_aiocb.h create mode 100644 lib/ts/ink_align.h create mode 100644 lib/ts/ink_apidefs.h create mode 100644 lib/ts/ink_args.cc create mode 100644 lib/ts/ink_args.h create mode 100644 lib/ts/ink_assert.cc create mode 100644 lib/ts/ink_assert.h create mode 100644 lib/ts/ink_atomic.h create mode 100644 lib/ts/ink_auth_api.cc create mode 100644 lib/ts/ink_auth_api.h create mode 100644 lib/ts/ink_autoconf.h.in create mode 100644 lib/ts/ink_base64.cc create mode 100644 lib/ts/ink_base64.h create mode 100644 lib/ts/ink_cap.cc create mode 100644 lib/ts/ink_cap.h create mode 100644 lib/ts/ink_code.cc create mode 100644 lib/ts/ink_code.h create mode 100644 lib/ts/ink_config.h.in create mode 100644 lib/ts/ink_defs.cc create mode 100644 lib/ts/ink_defs.h create mode 100644 lib/ts/ink_error.cc create mode 100644 lib/ts/ink_error.h create mode 100644 lib/ts/ink_exception.h create mode 100644 lib/ts/ink_file.cc create mode 100644 lib/ts/ink_file.h create mode 100644 lib/ts/ink_hash_table.cc create mode 100644 lib/ts/ink_hash_table.h create mode 100644 lib/ts/ink_hrtime.cc create mode 100644 lib/ts/ink_hrtime.h create mode 100644 lib/ts/ink_inet.cc create mode 100644 lib/ts/ink_inet.h create mode 100644 lib/ts/ink_inout.h create mode 100644 lib/ts/ink_llqueue.h create mode 100644 lib/ts/ink_lockfile.h create mode 100644 lib/ts/ink_memory.cc create mode 100644 lib/ts/ink_memory.h create mode 100644 lib/ts/ink_mutex.cc create mode 100644 lib/ts/ink_mutex.h create mode 100644 lib/ts/ink_platform.h create mode 100644 lib/ts/ink_queue.cc create mode 100644 lib/ts/ink_queue.h create mode 100644 lib/ts/ink_queue_ext.cc create mode 100644 lib/ts/ink_queue_ext.h create mode 100644 lib/ts/ink_queue_utils.cc create mode 100644 lib/ts/ink_rand.cc create mode 100644 lib/ts/ink_rand.h create mode 100644 lib/ts/ink_res_init.cc create mode 100644 lib/ts/ink_res_mkquery.cc create mode 100644 lib/ts/ink_resolver.h create mode 100644 lib/ts/ink_resource.cc create mode 100644 lib/ts/ink_resource.h create mode 100644 lib/ts/ink_rwlock.cc create mode 100644 lib/ts/ink_rwlock.h create mode 100644 lib/ts/ink_sock.cc create mode 100644 lib/ts/ink_sock.h create mode 100644 lib/ts/ink_sprintf.cc create mode 100644 lib/ts/ink_sprintf.h create mode 100644 lib/ts/ink_stack_trace.cc create mode 100644 lib/ts/ink_stack_trace.h create mode 100644 lib/ts/ink_string++.cc create mode 100644 lib/ts/ink_string++.h create mode 100644 lib/ts/ink_string.cc create mode 100644 lib/ts/ink_string.h create mode 100644 lib/ts/ink_sys_control.cc create mode 100644 lib/ts/ink_sys_control.h create mode 100644 lib/ts/ink_syslog.cc create mode 100644 lib/ts/ink_syslog.h create mode 100644 lib/ts/ink_thread.cc create mode 100644 lib/ts/ink_thread.h create mode 100644 lib/ts/ink_time.cc create mode 100644 lib/ts/ink_time.h create mode 100644 lib/ts/libts.h create mode 100644 lib/ts/llqueue.cc create mode 100644 lib/ts/load_http_hdr.cc create mode 100644 lib/ts/lockfile.cc create mode 100644 lib/ts/mkdfa.c create mode 100644 lib/ts/test_List.cc create mode 100644 lib/ts/test_Map.cc create mode 100644 lib/ts/test_Vec.cc create mode 100644 lib/ts/test_arena.cc create mode 100644 lib/ts/test_atomic.cc create mode 100644 lib/ts/test_freelist.cc create mode 100644 lib/ts/test_memchr.cc create mode 100644 lib/ts/test_strings.cc create mode 100644 lib/tsconfig/BisonHeaderToC++.sed create mode 100644 lib/tsconfig/Errata.cc create mode 100644 lib/tsconfig/Errata.h create mode 100644 lib/tsconfig/IntrusivePtr.h create mode 100644 lib/tsconfig/Makefile.am create mode 100644 lib/tsconfig/Makefile.in create mode 100644 lib/tsconfig/NumericType.h create mode 100644 lib/tsconfig/TsBuilder.cc create mode 100644 lib/tsconfig/TsBuilder.h create mode 100644 lib/tsconfig/TsConfigGrammar.y create mode 100644 lib/tsconfig/TsConfigLexer.h create mode 100644 lib/tsconfig/TsConfigParseEvents.h create mode 100644 lib/tsconfig/TsConfigSyntax.l create mode 100644 lib/tsconfig/TsConfigTypes.h create mode 100644 lib/tsconfig/TsErrataUtil.cc create mode 100644 lib/tsconfig/TsErrataUtil.h create mode 100644 lib/tsconfig/TsValue.cc create mode 100644 lib/tsconfig/TsValue.h create mode 100644 lib/tsconfig/test-1.tsconfig create mode 100644 lib/tsconfig/test-tsconfig.cc create mode 100644 lib/wccp/Makefile.am create mode 100644 lib/wccp/Makefile.in create mode 100644 lib/wccp/Wccp.h create mode 100644 lib/wccp/WccpConfig.cc create mode 100644 lib/wccp/WccpEndPoint.cc create mode 100644 lib/wccp/WccpLocal.h create mode 100644 lib/wccp/WccpMeta.h create mode 100644 lib/wccp/WccpMsg.cc create mode 100644 lib/wccp/WccpStatic.cc create mode 100644 lib/wccp/WccpUtil.h create mode 100644 lib/wccp/wccp-test-cache.cc create mode 100644 lib/wccp/wccp-test-router.cc create mode 100644 mgmt/AddConfigFilesHere.cc create mode 100644 mgmt/Alarms.cc create mode 100644 mgmt/Alarms.h create mode 100644 mgmt/BaseManager.cc create mode 100644 mgmt/BaseManager.h create mode 100644 mgmt/FileManager.cc create mode 100644 mgmt/FileManager.h create mode 100644 mgmt/LocalManager.cc create mode 100644 mgmt/LocalManager.h create mode 100644 mgmt/Main.cc create mode 100644 mgmt/Main.h create mode 100644 mgmt/Makefile.am create mode 100644 mgmt/Makefile.in create mode 100644 mgmt/MgmtDefs.h create mode 100644 mgmt/MultiFile.cc create mode 100644 mgmt/MultiFile.h create mode 100644 mgmt/ProcessManager.cc create mode 100644 mgmt/ProcessManager.h create mode 100644 mgmt/ProxyConfig.cc create mode 100644 mgmt/ProxyConfig.h create mode 100644 mgmt/RecordsConfig.cc create mode 100644 mgmt/RecordsConfig.h create mode 100644 mgmt/Rollback.cc create mode 100644 mgmt/Rollback.h create mode 100644 mgmt/api/APITestCliRemote.cc create mode 100644 mgmt/api/CfgContextDefs.h create mode 100644 mgmt/api/CfgContextImpl.cc create mode 100644 mgmt/api/CfgContextImpl.h create mode 100644 mgmt/api/CfgContextManager.cc create mode 100644 mgmt/api/CfgContextManager.h create mode 100644 mgmt/api/CfgContextUtils.cc create mode 100644 mgmt/api/CfgContextUtils.h create mode 100644 mgmt/api/CoreAPI.cc create mode 100644 mgmt/api/CoreAPI.h create mode 100644 mgmt/api/CoreAPIRemote.cc create mode 100644 mgmt/api/CoreAPIShared.cc create mode 100644 mgmt/api/CoreAPIShared.h create mode 100644 mgmt/api/EventCallback.cc create mode 100644 mgmt/api/EventCallback.h create mode 100644 mgmt/api/EventControlMain.cc create mode 100644 mgmt/api/EventControlMain.h create mode 100644 mgmt/api/EventRegistration.cc create mode 100644 mgmt/api/EventRegistration.h create mode 100644 mgmt/api/GenericParser.cc create mode 100644 mgmt/api/GenericParser.h create mode 100644 mgmt/api/INKMgmtAPI.cc create mode 100644 mgmt/api/INKMgmtAPIStub.cc create mode 100644 mgmt/api/Makefile.am create mode 100644 mgmt/api/Makefile.in create mode 100644 mgmt/api/NetworkUtilsDefs.h create mode 100644 mgmt/api/NetworkUtilsLocal.cc create mode 100644 mgmt/api/NetworkUtilsLocal.h create mode 100644 mgmt/api/NetworkUtilsRemote.cc create mode 100644 mgmt/api/NetworkUtilsRemote.h create mode 100644 mgmt/api/TSControlMain.cc create mode 100644 mgmt/api/TSControlMain.h create mode 100644 mgmt/api/include/Makefile.am create mode 100644 mgmt/api/include/Makefile.in create mode 100644 mgmt/api/include/mgmtapi.h create mode 100644 mgmt/cluster/ClusterCom.cc create mode 100644 mgmt/cluster/ClusterCom.h create mode 100644 mgmt/cluster/Makefile.am create mode 100644 mgmt/cluster/Makefile.in create mode 100644 mgmt/cluster/VMap.cc create mode 100644 mgmt/cluster/VMap.h create mode 100644 mgmt/stats/Makefile.am create mode 100644 mgmt/stats/Makefile.in create mode 100644 mgmt/stats/StatProcessor.cc create mode 100644 mgmt/stats/StatProcessor.h create mode 100644 mgmt/stats/StatType.cc create mode 100644 mgmt/stats/StatType.h create mode 100644 mgmt/stats/StatXML.cc create mode 100644 mgmt/stats/StatXML.h create mode 100644 mgmt/stats/spec create mode 100644 mgmt/utils/ExpandingArray.cc create mode 100644 mgmt/utils/ExpandingArray.h create mode 100644 mgmt/utils/Makefile.am create mode 100644 mgmt/utils/Makefile.in create mode 100644 mgmt/utils/MgmtHashTable.h create mode 100644 mgmt/utils/MgmtSocket.h create mode 100644 mgmt/utils/MgmtUtils.cc create mode 100644 mgmt/utils/MgmtUtils.h create mode 100644 mgmt/utils/WebMgmtUtils.cc create mode 100644 mgmt/utils/WebMgmtUtils.h create mode 100644 mgmt/web2/Makefile.am create mode 100644 mgmt/web2/Makefile.in create mode 100644 mgmt/web2/WebCompatibility.cc create mode 100644 mgmt/web2/WebCompatibility.h create mode 100644 mgmt/web2/WebGlobals.h create mode 100644 mgmt/web2/WebHttp.cc create mode 100644 mgmt/web2/WebHttp.h create mode 100644 mgmt/web2/WebHttpContext.cc create mode 100644 mgmt/web2/WebHttpContext.h create mode 100644 mgmt/web2/WebHttpMessage.cc create mode 100644 mgmt/web2/WebHttpMessage.h create mode 100644 mgmt/web2/WebHttpSession.cc create mode 100644 mgmt/web2/WebHttpSession.h create mode 100644 mgmt/web2/WebIntrMain.cc create mode 100644 mgmt/web2/WebIntrMain.h create mode 100644 mgmt/web2/WebOverview.cc create mode 100644 mgmt/web2/WebOverview.h create mode 100644 mgmt/web2/WebUtils.cc create mode 100644 mgmt/web2/WebUtils.h create mode 100644 plugins/Makefile.am create mode 100644 plugins/Makefile.in create mode 100644 plugins/cacheurl/Makefile.am create mode 100644 plugins/cacheurl/Makefile.in create mode 100644 plugins/cacheurl/cacheurl.cc create mode 100644 plugins/cacheurl/cacheurl.config.example create mode 100644 plugins/conf_remap/Makefile.am create mode 100644 plugins/conf_remap/Makefile.in create mode 100644 plugins/conf_remap/conf_remap.cc create mode 100644 plugins/experimental/Makefile.am create mode 100644 plugins/experimental/Makefile.in create mode 100644 plugins/experimental/authproxy/Makefile.am create mode 100644 plugins/experimental/authproxy/Makefile.in create mode 100644 plugins/experimental/authproxy/authproxy.cc create mode 100644 plugins/experimental/authproxy/utils.cc create mode 100644 plugins/experimental/authproxy/utils.h create mode 100644 plugins/experimental/background_fetch/Makefile.am create mode 100644 plugins/experimental/background_fetch/Makefile.in create mode 100644 plugins/experimental/background_fetch/background_fetch.cc create mode 100644 plugins/experimental/balancer/Makefile.am create mode 100644 plugins/experimental/balancer/Makefile.in create mode 100644 plugins/experimental/balancer/balancer.cc create mode 100644 plugins/experimental/balancer/balancer.h create mode 100644 plugins/experimental/balancer/hash.cc create mode 100644 plugins/experimental/balancer/roundrobin.cc create mode 100644 plugins/experimental/buffer_upload/Makefile.am create mode 100644 plugins/experimental/buffer_upload/Makefile.in create mode 100644 plugins/experimental/buffer_upload/README create mode 100644 plugins/experimental/buffer_upload/buffer_upload.cc create mode 100644 plugins/experimental/channel_stats/Makefile.am create mode 100644 plugins/experimental/channel_stats/Makefile.in create mode 100644 plugins/experimental/channel_stats/Makefile.tsxs create mode 100644 plugins/experimental/channel_stats/README create mode 100644 plugins/experimental/channel_stats/channel_stats.cc create mode 100644 plugins/experimental/channel_stats/debug_macros.h create mode 100644 plugins/experimental/collapsed_connection/LICENSE create mode 100644 plugins/experimental/collapsed_connection/Makefile.am create mode 100644 plugins/experimental/collapsed_connection/Makefile.in create mode 100644 plugins/experimental/collapsed_connection/MurmurHash3.cc create mode 100644 plugins/experimental/collapsed_connection/MurmurHash3.h create mode 100644 plugins/experimental/collapsed_connection/NOTICE create mode 100644 plugins/experimental/collapsed_connection/P_collapsed_connection.h create mode 100644 plugins/experimental/collapsed_connection/README create mode 100644 plugins/experimental/collapsed_connection/collapsed_connection.cc create mode 100644 plugins/experimental/collapsed_connection/default.config create mode 100644 plugins/experimental/collapsed_connection/state.dot create mode 100644 plugins/experimental/custom_redirect/Makefile.am create mode 100644 plugins/experimental/custom_redirect/Makefile.in create mode 100644 plugins/experimental/custom_redirect/README create mode 100644 plugins/experimental/custom_redirect/custom_redirect.cc create mode 100644 plugins/experimental/escalate/Makefile.am create mode 100644 plugins/experimental/escalate/Makefile.in create mode 100644 plugins/experimental/escalate/escalate.cc create mode 100644 plugins/experimental/esi/Makefile.am create mode 100644 plugins/experimental/esi/Makefile.in create mode 100644 plugins/experimental/esi/README create mode 100644 plugins/experimental/esi/README.combo create mode 100644 plugins/experimental/esi/combo_handler.cc create mode 100644 plugins/experimental/esi/esi.cc create mode 100644 plugins/experimental/esi/fetcher/FetchedDataProcessor.h create mode 100644 plugins/experimental/esi/fetcher/HttpDataFetcher.h create mode 100644 plugins/experimental/esi/fetcher/HttpDataFetcherImpl.cc create mode 100644 plugins/experimental/esi/fetcher/HttpDataFetcherImpl.h create mode 100644 plugins/experimental/esi/handlers.cfg create mode 100644 plugins/experimental/esi/lib/Attribute.h create mode 100644 plugins/experimental/esi/lib/ComponentBase.h create mode 100644 plugins/experimental/esi/lib/DocNode.cc create mode 100644 plugins/experimental/esi/lib/DocNode.h create mode 100644 plugins/experimental/esi/lib/EsiGzip.cc create mode 100644 plugins/experimental/esi/lib/EsiGzip.h create mode 100644 plugins/experimental/esi/lib/EsiParser.cc create mode 100644 plugins/experimental/esi/lib/EsiParser.h create mode 100644 plugins/experimental/esi/lib/EsiProcessor.cc create mode 100644 plugins/experimental/esi/lib/EsiProcessor.h create mode 100644 plugins/experimental/esi/lib/Expression.cc create mode 100644 plugins/experimental/esi/lib/Expression.h create mode 100644 plugins/experimental/esi/lib/FailureInfo.cc create mode 100644 plugins/experimental/esi/lib/FailureInfo.h create mode 100644 plugins/experimental/esi/lib/HandlerManager.cc create mode 100644 plugins/experimental/esi/lib/HandlerManager.h create mode 100644 plugins/experimental/esi/lib/HttpHeader.h create mode 100644 plugins/experimental/esi/lib/IncludeHandlerFactory.h create mode 100644 plugins/experimental/esi/lib/SpecialIncludeHandler.h create mode 100644 plugins/experimental/esi/lib/Stats.cc create mode 100644 plugins/experimental/esi/lib/Stats.h create mode 100644 plugins/experimental/esi/lib/StringHash.h create mode 100644 plugins/experimental/esi/lib/Utils.cc create mode 100644 plugins/experimental/esi/lib/Utils.h create mode 100644 plugins/experimental/esi/lib/Variables.cc create mode 100644 plugins/experimental/esi/lib/Variables.h create mode 100644 plugins/experimental/esi/lib/gzip.cc create mode 100644 plugins/experimental/esi/lib/gzip.h create mode 100644 plugins/experimental/esi/serverIntercept.cc create mode 100644 plugins/experimental/esi/serverIntercept.h create mode 100644 plugins/experimental/esi/test/HandlerMap.cc create mode 100644 plugins/experimental/esi/test/HandlerMap.h create mode 100644 plugins/experimental/esi/test/StubIncludeHandler.cc create mode 100644 plugins/experimental/esi/test/StubIncludeHandler.h create mode 100644 plugins/experimental/esi/test/TestHandlerManager.cc create mode 100644 plugins/experimental/esi/test/TestHttpDataFetcher.h create mode 100644 plugins/experimental/esi/test/docnode_test.cc create mode 100644 plugins/experimental/esi/test/parser_test.cc create mode 100644 plugins/experimental/esi/test/print_funcs.cc create mode 100644 plugins/experimental/esi/test/print_funcs.h create mode 100644 plugins/experimental/esi/test/processor_test.cc create mode 100644 plugins/experimental/esi/test/sampleProb.cc create mode 100644 plugins/experimental/esi/test/utils_test.cc create mode 100644 plugins/experimental/esi/test/vars_test.cc create mode 100644 plugins/experimental/geoip_acl/Makefile.am create mode 100644 plugins/experimental/geoip_acl/Makefile.in create mode 100644 plugins/experimental/geoip_acl/README create mode 100644 plugins/experimental/geoip_acl/acl.cc create mode 100644 plugins/experimental/geoip_acl/acl.h create mode 100644 plugins/experimental/geoip_acl/examples create mode 100644 plugins/experimental/geoip_acl/geoip_acl.cc create mode 100644 plugins/experimental/geoip_acl/lulu.h create mode 100644 plugins/experimental/healthchecks/Makefile.am create mode 100644 plugins/experimental/healthchecks/Makefile.in create mode 100644 plugins/experimental/healthchecks/README create mode 100644 plugins/experimental/healthchecks/healthchecks.c create mode 100644 plugins/experimental/hipes/Makefile.am create mode 100644 plugins/experimental/hipes/Makefile.in create mode 100644 plugins/experimental/hipes/README create mode 100644 plugins/experimental/hipes/gen_escape.c create mode 100644 plugins/experimental/hipes/hipes.cc create mode 100644 plugins/experimental/memcached_remap/AUTHORS create mode 100644 plugins/experimental/memcached_remap/Changelog create mode 100644 plugins/experimental/memcached_remap/NOTICE create mode 100644 plugins/experimental/memcached_remap/README create mode 100644 plugins/experimental/memcached_remap/memcached_remap.cc create mode 100755 plugins/experimental/memcached_remap/sample.py create mode 100644 plugins/experimental/metalink/Makefile.am create mode 100644 plugins/experimental/metalink/Makefile.in create mode 100644 plugins/experimental/metalink/README create mode 100644 plugins/experimental/metalink/metalink.cc create mode 100755 plugins/experimental/metalink/test/chunkedEncoding create mode 100755 plugins/experimental/metalink/test/chunkedEncodingDisconnect create mode 100755 plugins/experimental/metalink/test/clientDisconnect create mode 100755 plugins/experimental/metalink/test/contentLength create mode 100755 plugins/experimental/metalink/test/contentLengthDisconnect create mode 100755 plugins/experimental/metalink/test/finalChunkedEncodingDisconnect create mode 100755 plugins/experimental/metalink/test/headers create mode 100755 plugins/experimental/metalink/test/http09 create mode 100755 plugins/experimental/metalink/test/location create mode 100755 plugins/experimental/metalink/test/longer create mode 100755 plugins/experimental/metalink/test/notModified create mode 100755 plugins/experimental/metalink/test/pipeliningDisconnect create mode 100755 plugins/experimental/metalink/test/shortChunkedEncodingDisconnect create mode 100755 plugins/experimental/metalink/test/shortClientDisconnect create mode 100755 plugins/experimental/metalink/test/shortContentLengthDisconnect create mode 100755 plugins/experimental/metalink/test/zero create mode 100644 plugins/experimental/mysql_remap/AUTHORS create mode 100644 plugins/experimental/mysql_remap/README create mode 100644 plugins/experimental/mysql_remap/default.h create mode 100644 plugins/experimental/mysql_remap/lib/dictionary.c create mode 100644 plugins/experimental/mysql_remap/lib/dictionary.h create mode 100644 plugins/experimental/mysql_remap/lib/iniparser.c create mode 100644 plugins/experimental/mysql_remap/lib/iniparser.h create mode 100644 plugins/experimental/mysql_remap/mysql_remap.cc create mode 100644 plugins/experimental/mysql_remap/sample.ini create mode 100644 plugins/experimental/mysql_remap/schema/import.sql create mode 100644 plugins/experimental/regex_revalidate/Makefile.am create mode 100644 plugins/experimental/regex_revalidate/Makefile.in create mode 100644 plugins/experimental/regex_revalidate/regex_revalidate.c create mode 100644 plugins/experimental/remap_stats/Makefile.am create mode 100644 plugins/experimental/remap_stats/Makefile.in create mode 100644 plugins/experimental/remap_stats/remap_stats.c create mode 100644 plugins/experimental/s3_auth/Makefile.am create mode 100644 plugins/experimental/s3_auth/Makefile.in create mode 100644 plugins/experimental/s3_auth/s3_auth.cc create mode 100644 plugins/experimental/spdy/Makefile.am create mode 100644 plugins/experimental/spdy/TODO create mode 100644 plugins/experimental/spdy/http.cc create mode 100644 plugins/experimental/spdy/http.h create mode 100644 plugins/experimental/spdy/io.cc create mode 100644 plugins/experimental/spdy/io.h create mode 100644 plugins/experimental/spdy/lib/base/atomic.h create mode 100644 plugins/experimental/spdy/lib/base/inet.h create mode 100644 plugins/experimental/spdy/lib/base/logging.cc create mode 100644 plugins/experimental/spdy/lib/base/logging.h create mode 100644 plugins/experimental/spdy/lib/spdy/message.cc create mode 100644 plugins/experimental/spdy/lib/spdy/spdy.h create mode 100644 plugins/experimental/spdy/lib/spdy/zstream.cc create mode 100644 plugins/experimental/spdy/lib/spdy/zstream.h create mode 100644 plugins/experimental/spdy/protocol.cc create mode 100644 plugins/experimental/spdy/protocol.h create mode 100644 plugins/experimental/spdy/spdy.cc create mode 100644 plugins/experimental/spdy/stream.cc create mode 100644 plugins/experimental/spdy/strings.cc create mode 100644 plugins/experimental/spdy/tests/stubs.cc create mode 100644 plugins/experimental/spdy/tests/zstream_test.cc create mode 100644 plugins/experimental/stale_while_revalidate/Makefile.am create mode 100644 plugins/experimental/stale_while_revalidate/Makefile.in create mode 100644 plugins/experimental/stale_while_revalidate/Makefile.tsxs create mode 100644 plugins/experimental/stale_while_revalidate/README create mode 100644 plugins/experimental/stale_while_revalidate/stale_while_revalidate.c create mode 100755 plugins/experimental/stale_while_revalidate/test_server.js create mode 100644 plugins/experimental/ts_lua/Makefile.am create mode 100644 plugins/experimental/ts_lua/Makefile.in create mode 100644 plugins/experimental/ts_lua/README.md create mode 100644 plugins/experimental/ts_lua/business/mediaslice.lua create mode 100644 plugins/experimental/ts_lua/business/sethost.lua create mode 100644 plugins/experimental/ts_lua/example/test_cache_lookup.lua create mode 100644 plugins/experimental/ts_lua/example/test_client_socket.lua create mode 100644 plugins/experimental/ts_lua/example/test_closure.lua create mode 100644 plugins/experimental/ts_lua/example/test_creq_method.lua create mode 100644 plugins/experimental/ts_lua/example/test_creq_uri.lua create mode 100644 plugins/experimental/ts_lua/example/test_creq_uri_args.lua create mode 100644 plugins/experimental/ts_lua/example/test_global_hdr.lua create mode 100644 plugins/experimental/ts_lua/example/test_global_hook.lua create mode 100644 plugins/experimental/ts_lua/example/test_hdr.lua create mode 100644 plugins/experimental/ts_lua/example/test_intercept.lua create mode 100644 plugins/experimental/ts_lua/example/test_post.lua create mode 100644 plugins/experimental/ts_lua/example/test_ret_403.lua create mode 100644 plugins/experimental/ts_lua/example/test_server_intercept.lua create mode 100644 plugins/experimental/ts_lua/example/test_transform.lua create mode 100644 plugins/experimental/ts_lua/example/test_txn_hook.lua create mode 100644 plugins/experimental/ts_lua/ts_lua.c create mode 100644 plugins/experimental/ts_lua/ts_lua_cached_response.c create mode 100644 plugins/experimental/ts_lua/ts_lua_cached_response.h create mode 100644 plugins/experimental/ts_lua/ts_lua_client_request.c create mode 100644 plugins/experimental/ts_lua/ts_lua_client_request.h create mode 100644 plugins/experimental/ts_lua/ts_lua_client_response.c create mode 100644 plugins/experimental/ts_lua/ts_lua_client_response.h create mode 100644 plugins/experimental/ts_lua/ts_lua_common.h create mode 100644 plugins/experimental/ts_lua/ts_lua_context.c create mode 100644 plugins/experimental/ts_lua/ts_lua_context.h create mode 100644 plugins/experimental/ts_lua/ts_lua_crypto.c create mode 100644 plugins/experimental/ts_lua/ts_lua_crypto.h create mode 100644 plugins/experimental/ts_lua/ts_lua_hook.c create mode 100644 plugins/experimental/ts_lua/ts_lua_hook.h create mode 100644 plugins/experimental/ts_lua/ts_lua_http.c create mode 100644 plugins/experimental/ts_lua/ts_lua_http.h create mode 100644 plugins/experimental/ts_lua/ts_lua_http_cntl.c create mode 100644 plugins/experimental/ts_lua/ts_lua_http_cntl.h create mode 100644 plugins/experimental/ts_lua/ts_lua_http_config.c create mode 100644 plugins/experimental/ts_lua/ts_lua_http_config.h create mode 100644 plugins/experimental/ts_lua/ts_lua_http_intercept.c create mode 100644 plugins/experimental/ts_lua/ts_lua_http_intercept.h create mode 100644 plugins/experimental/ts_lua/ts_lua_log.c create mode 100644 plugins/experimental/ts_lua/ts_lua_log.h create mode 100644 plugins/experimental/ts_lua/ts_lua_mgmt.c create mode 100644 plugins/experimental/ts_lua/ts_lua_mgmt.h create mode 100644 plugins/experimental/ts_lua/ts_lua_misc.c create mode 100644 plugins/experimental/ts_lua/ts_lua_misc.h create mode 100644 plugins/experimental/ts_lua/ts_lua_package.c create mode 100644 plugins/experimental/ts_lua/ts_lua_package.h create mode 100644 plugins/experimental/ts_lua/ts_lua_remap.c create mode 100644 plugins/experimental/ts_lua/ts_lua_remap.h create mode 100644 plugins/experimental/ts_lua/ts_lua_server_request.c create mode 100644 plugins/experimental/ts_lua/ts_lua_server_request.h create mode 100644 plugins/experimental/ts_lua/ts_lua_server_response.c create mode 100644 plugins/experimental/ts_lua/ts_lua_server_response.h create mode 100644 plugins/experimental/ts_lua/ts_lua_string.c create mode 100644 plugins/experimental/ts_lua/ts_lua_string.h create mode 100644 plugins/experimental/ts_lua/ts_lua_transform.c create mode 100644 plugins/experimental/ts_lua/ts_lua_transform.h create mode 100644 plugins/experimental/ts_lua/ts_lua_util.c create mode 100644 plugins/experimental/ts_lua/ts_lua_util.h create mode 100644 plugins/experimental/url_sig/Makefile.am create mode 100644 plugins/experimental/url_sig/Makefile.in create mode 100644 plugins/experimental/url_sig/Makefile.tsxs create mode 100644 plugins/experimental/url_sig/README create mode 100755 plugins/experimental/url_sig/genkeys.pl create mode 100755 plugins/experimental/url_sig/sign.pl create mode 100644 plugins/experimental/url_sig/url_sig.c create mode 100644 plugins/experimental/url_sig/url_sig.h create mode 100644 plugins/experimental/xdebug/Makefile.am create mode 100644 plugins/experimental/xdebug/Makefile.in create mode 100644 plugins/experimental/xdebug/xdebug.cc create mode 100644 plugins/gzip/Makefile.am create mode 100644 plugins/gzip/Makefile.in create mode 100644 plugins/gzip/README create mode 100644 plugins/gzip/configuration.cc create mode 100644 plugins/gzip/configuration.h create mode 100644 plugins/gzip/debug_macros.h create mode 100644 plugins/gzip/gzip.cc create mode 100644 plugins/gzip/misc.cc create mode 100644 plugins/gzip/misc.h create mode 100644 plugins/gzip/sample.gzip.config create mode 100644 plugins/header_rewrite/Examples/Force-close create mode 100644 plugins/header_rewrite/Examples/Regression create mode 100644 plugins/header_rewrite/Examples/YCS-EC create mode 100644 plugins/header_rewrite/Makefile.am create mode 100644 plugins/header_rewrite/Makefile.in create mode 100644 plugins/header_rewrite/README create mode 100644 plugins/header_rewrite/TODO create mode 100644 plugins/header_rewrite/condition.cc create mode 100644 plugins/header_rewrite/condition.h create mode 100644 plugins/header_rewrite/conditions.cc create mode 100644 plugins/header_rewrite/conditions.h create mode 100644 plugins/header_rewrite/expander.cc create mode 100644 plugins/header_rewrite/expander.h create mode 100644 plugins/header_rewrite/factory.cc create mode 100644 plugins/header_rewrite/factory.h create mode 100644 plugins/header_rewrite/header_rewrite.cc create mode 100644 plugins/header_rewrite/lulu.cc create mode 100644 plugins/header_rewrite/lulu.h create mode 100644 plugins/header_rewrite/matcher.cc create mode 100644 plugins/header_rewrite/matcher.h create mode 100644 plugins/header_rewrite/operator.cc create mode 100644 plugins/header_rewrite/operator.h create mode 100644 plugins/header_rewrite/operators.cc create mode 100644 plugins/header_rewrite/operators.h create mode 100644 plugins/header_rewrite/parser.cc create mode 100644 plugins/header_rewrite/parser.h create mode 100644 plugins/header_rewrite/regex_helper.cc create mode 100644 plugins/header_rewrite/regex_helper.h create mode 100644 plugins/header_rewrite/resources.cc create mode 100644 plugins/header_rewrite/resources.h create mode 100644 plugins/header_rewrite/ruleset.cc create mode 100644 plugins/header_rewrite/ruleset.h create mode 100644 plugins/header_rewrite/statement.cc create mode 100644 plugins/header_rewrite/statement.h create mode 100644 plugins/header_rewrite/value.h create mode 100644 plugins/libloader/Makefile.am create mode 100644 plugins/libloader/Makefile.in create mode 100644 plugins/libloader/libloader.c create mode 100644 plugins/regex_remap/Makefile.am create mode 100644 plugins/regex_remap/Makefile.in create mode 100644 plugins/regex_remap/README create mode 100644 plugins/regex_remap/regex_remap.cc create mode 100644 plugins/stats_over_http/Makefile.am create mode 100644 plugins/stats_over_http/Makefile.in create mode 100644 plugins/stats_over_http/README create mode 100644 plugins/stats_over_http/stats_over_http.c create mode 100644 plugins/tcpinfo/Makefile.am create mode 100644 plugins/tcpinfo/Makefile.in create mode 100644 plugins/tcpinfo/tcpinfo.cc create mode 100644 proxy/AbstractBuffer.cc create mode 100644 proxy/AbstractBuffer.h create mode 100644 proxy/CacheControl.cc create mode 100644 proxy/CacheControl.h create mode 100644 proxy/ClassH.txt create mode 100644 proxy/CompletionUtil.h create mode 100644 proxy/ConfigParse.h create mode 100644 proxy/ControlBase.cc create mode 100644 proxy/ControlBase.h create mode 100644 proxy/ControlMatcher.cc create mode 100644 proxy/ControlMatcher.h create mode 100644 proxy/CoreUtils.cc create mode 100644 proxy/CoreUtils.h create mode 100644 proxy/DebugStreamLevels.txt create mode 100644 proxy/DynamicStats.h create mode 100644 proxy/EventName.cc create mode 100644 proxy/EventName.h create mode 100644 proxy/FetchSM.cc create mode 100644 proxy/FetchSM.h create mode 100644 proxy/HttpTransStats.h create mode 100644 proxy/ICP.cc create mode 100644 proxy/ICP.h create mode 100644 proxy/ICPConfig.cc create mode 100644 proxy/ICPProcessor.cc create mode 100644 proxy/ICPProcessor.h create mode 100644 proxy/ICPStats.cc create mode 100644 proxy/ICPevents.h create mode 100644 proxy/ICPlog.h create mode 100644 proxy/IPAllow.cc create mode 100644 proxy/IPAllow.h create mode 100644 proxy/InkAPI-ensure-funcs-present.pl create mode 100644 proxy/InkAPI.cc create mode 100644 proxy/InkAPIInternal.h create mode 100644 proxy/InkAPITest.cc create mode 100644 proxy/InkAPITestTool.cc create mode 100644 proxy/InkIOCoreAPI.cc create mode 100644 proxy/InkPool_r.h create mode 100644 proxy/Main.cc create mode 100644 proxy/Main.h create mode 100644 proxy/Makefile.am create mode 100644 proxy/Makefile.in create mode 100644 proxy/ParentSelection.cc create mode 100644 proxy/ParentSelection.h create mode 100644 proxy/Plugin.cc create mode 100644 proxy/Plugin.h create mode 100644 proxy/PluginVC.cc create mode 100644 proxy/PluginVC.h create mode 100644 proxy/Prefetch.cc create mode 100644 proxy/Prefetch.h create mode 100644 proxy/ProtoSM.h create mode 100644 proxy/ProtocolProbeSessionAccept.cc create mode 100644 proxy/ProtocolProbeSessionAccept.h create mode 100644 proxy/README-stats.otl create mode 100644 proxy/RegressionSM.cc create mode 100644 proxy/RegressionSM.h create mode 100644 proxy/ReverseProxy.cc create mode 100644 proxy/ReverseProxy.h create mode 100644 proxy/Show.h create mode 100644 proxy/SocksProxy.cc create mode 100644 proxy/StatPages.cc create mode 100644 proxy/StatPages.h create mode 100644 proxy/StatSystem.cc create mode 100644 proxy/StatSystem.h create mode 100644 proxy/StufferUdpReceiver.cc create mode 100644 proxy/TestClock.cc create mode 100644 proxy/TestClusterHash.cc create mode 100644 proxy/TestDNS.cc create mode 100644 proxy/TestPreProc.cc create mode 100644 proxy/TestPreProc.h create mode 100644 proxy/TestProxy.cc create mode 100644 proxy/TestRegex.cc create mode 100644 proxy/TestSimpleProxy.cc create mode 100644 proxy/TimeTrace.h create mode 100644 proxy/Transform.cc create mode 100644 proxy/Transform.h create mode 100644 proxy/TransformInternal.h create mode 100644 proxy/UDPAPIClientTest.cc create mode 100644 proxy/UDPAPIClientTest.h create mode 100644 proxy/UDPAPITest.cc create mode 100644 proxy/UDPAPITest.h create mode 100644 proxy/UnixCompletionUtil.h create mode 100644 proxy/Update.cc create mode 100644 proxy/Update.h create mode 100644 proxy/UserNameCacheTest.h create mode 100644 proxy/api/ts/InkAPIPrivateIOCore.h create mode 100644 proxy/api/ts/Makefile.am create mode 100644 proxy/api/ts/Makefile.in create mode 100644 proxy/api/ts/TsException.h create mode 100644 proxy/api/ts/experimental.h create mode 100644 proxy/api/ts/remap.h create mode 100644 proxy/api/ts/ts.h create mode 100644 proxy/config/Makefile.am create mode 100644 proxy/config/Makefile.in create mode 100644 proxy/config/body_factory/Makefile.am create mode 100644 proxy/config/body_factory/Makefile.in create mode 100644 proxy/config/body_factory/default/.body_factory_info create mode 100644 proxy/config/body_factory/default/Makefile.am create mode 100644 proxy/config/body_factory/default/Makefile.in create mode 100644 proxy/config/body_factory/default/README create mode 100644 proxy/config/body_factory/default/access#denied create mode 100644 proxy/config/body_factory/default/access#proxy_auth_required create mode 100644 proxy/config/body_factory/default/access#redirect_url create mode 100644 proxy/config/body_factory/default/access#ssl_forbidden create mode 100644 proxy/config/body_factory/default/cache#not_in_cache create mode 100644 proxy/config/body_factory/default/cache#read_error create mode 100644 proxy/config/body_factory/default/congestion#retryAfter create mode 100644 proxy/config/body_factory/default/connect#dns_failed create mode 100644 proxy/config/body_factory/default/connect#failed_connect create mode 100644 proxy/config/body_factory/default/connect#hangup create mode 100644 proxy/config/body_factory/default/default create mode 100644 proxy/config/body_factory/default/interception#no_host create mode 100644 proxy/config/body_factory/default/redirect#moved_permanently create mode 100644 proxy/config/body_factory/default/redirect#moved_temporarily create mode 100644 proxy/config/body_factory/default/request#cycle_detected create mode 100644 proxy/config/body_factory/default/request#invalid_content_length create mode 100644 proxy/config/body_factory/default/request#no_content_length create mode 100644 proxy/config/body_factory/default/request#no_host create mode 100644 proxy/config/body_factory/default/request#scheme_unsupported create mode 100644 proxy/config/body_factory/default/request#syntax_error create mode 100644 proxy/config/body_factory/default/response#bad_response create mode 100644 proxy/config/body_factory/default/response#bad_version create mode 100644 proxy/config/body_factory/default/timeout#activity create mode 100644 proxy/config/body_factory/default/timeout#inactivity create mode 100644 proxy/config/body_factory/default/transcoding#unsupported create mode 100644 proxy/config/body_factory/default/urlrouting#no_mapping create mode 100644 proxy/config/cache.config.default create mode 100644 proxy/config/cluster.config.default create mode 100644 proxy/config/congestion.config.default create mode 100644 proxy/config/hosting.config.default create mode 100644 proxy/config/icp.config.default create mode 100644 proxy/config/ip_allow.config.default create mode 100644 proxy/config/log_hosts.config.default create mode 100644 proxy/config/logs_xml.config.default create mode 100644 proxy/config/parent.config.default create mode 100644 proxy/config/plugin.config.default create mode 100644 proxy/config/prefetch.config.default create mode 100644 proxy/config/public_key.der create mode 100644 proxy/config/records.config.default.in create mode 100644 proxy/config/remap.config.default create mode 100644 proxy/config/socks.config.default create mode 100644 proxy/config/splitdns.config.default create mode 100644 proxy/config/ssl_multicert.config.default create mode 100644 proxy/config/stats.config.dtd create mode 100644 proxy/config/stats.config.xml.default create mode 100644 proxy/config/storage.config.default.in create mode 100644 proxy/config/throttle_error.html.example create mode 100644 proxy/config/update.config.default create mode 100644 proxy/config/vaddrs.config.default create mode 100644 proxy/config/volume.config.default create mode 100644 proxy/congest/Congestion.cc create mode 100644 proxy/congest/Congestion.h create mode 100644 proxy/congest/CongestionDB.cc create mode 100644 proxy/congest/CongestionDB.h create mode 100644 proxy/congest/CongestionStats.cc create mode 100644 proxy/congest/CongestionStats.h create mode 100644 proxy/congest/CongestionTest.cc create mode 100644 proxy/congest/FeatureSpec.txt create mode 100644 proxy/congest/MT_hashtable.h create mode 100644 proxy/congest/Makefile.am create mode 100644 proxy/congest/Makefile.in create mode 100644 proxy/example_alarm_bin.sh create mode 100644 proxy/example_prep.sh create mode 100644 proxy/hdrs/HTTP.cc create mode 100644 proxy/hdrs/HTTP.h create mode 100644 proxy/hdrs/HdrHeap.cc create mode 100644 proxy/hdrs/HdrHeap.h create mode 100644 proxy/hdrs/HdrTSOnly.cc create mode 100644 proxy/hdrs/HdrTest.cc create mode 100644 proxy/hdrs/HdrTest.h create mode 100644 proxy/hdrs/HdrToken.cc create mode 100644 proxy/hdrs/HdrToken.h create mode 100644 proxy/hdrs/HdrUtils.cc create mode 100644 proxy/hdrs/HdrUtils.h create mode 100644 proxy/hdrs/HttpCompat.cc create mode 100644 proxy/hdrs/HttpCompat.h create mode 100644 proxy/hdrs/MIME.cc create mode 100644 proxy/hdrs/MIME.h create mode 100644 proxy/hdrs/Makefile.am create mode 100644 proxy/hdrs/Makefile.in create mode 100644 proxy/hdrs/URL.cc create mode 100644 proxy/hdrs/URL.h create mode 100644 proxy/hdrs/load_http_hdr.cc create mode 100644 proxy/hdrs/test_header.cc create mode 100644 proxy/hdrs/test_urlhash.cc create mode 100644 proxy/http/HttpBodyFactory.cc create mode 100644 proxy/http/HttpBodyFactory.h create mode 100644 proxy/http/HttpCacheSM.cc create mode 100644 proxy/http/HttpCacheSM.h create mode 100644 proxy/http/HttpClientSession.cc create mode 100644 proxy/http/HttpClientSession.h create mode 100644 proxy/http/HttpConfig.cc create mode 100644 proxy/http/HttpConfig.h create mode 100644 proxy/http/HttpConnectionCount.cc create mode 100644 proxy/http/HttpConnectionCount.h create mode 100644 proxy/http/HttpDebugNames.cc create mode 100644 proxy/http/HttpDebugNames.h create mode 100644 proxy/http/HttpPages.cc create mode 100644 proxy/http/HttpPages.h create mode 100644 proxy/http/HttpProxyAPIEnums.h create mode 100644 proxy/http/HttpProxyServerMain.cc create mode 100644 proxy/http/HttpProxyServerMain.h create mode 100644 proxy/http/HttpSM.cc create mode 100644 proxy/http/HttpSM.h create mode 100644 proxy/http/HttpServerSession.cc create mode 100644 proxy/http/HttpServerSession.h create mode 100644 proxy/http/HttpSessionAccept.cc create mode 100644 proxy/http/HttpSessionAccept.h create mode 100644 proxy/http/HttpSessionManager.cc create mode 100644 proxy/http/HttpSessionManager.h create mode 100644 proxy/http/HttpTransact.cc create mode 100644 proxy/http/HttpTransact.h create mode 100644 proxy/http/HttpTransactCache.cc create mode 100644 proxy/http/HttpTransactCache.h create mode 100644 proxy/http/HttpTransactHeaders.cc create mode 100644 proxy/http/HttpTransactHeaders.h create mode 100644 proxy/http/HttpTunnel.cc create mode 100644 proxy/http/HttpTunnel.h create mode 100644 proxy/http/HttpUpdateSM.cc create mode 100644 proxy/http/HttpUpdateSM.h create mode 100644 proxy/http/HttpUpdateTester.cc create mode 100644 proxy/http/Makefile.am create mode 100644 proxy/http/Makefile.in create mode 100644 proxy/http/README.via create mode 100644 proxy/http/RegressionHttpTransact.cc create mode 100644 proxy/http/TestHttpTransact.cc create mode 100644 proxy/http/TestUrl.cc create mode 100644 proxy/http/remap/AclFiltering.cc create mode 100644 proxy/http/remap/AclFiltering.h create mode 100644 proxy/http/remap/Makefile.am create mode 100644 proxy/http/remap/Makefile.in create mode 100644 proxy/http/remap/RemapConfig.cc create mode 100644 proxy/http/remap/RemapConfig.h create mode 100644 proxy/http/remap/RemapPluginInfo.cc create mode 100644 proxy/http/remap/RemapPluginInfo.h create mode 100644 proxy/http/remap/RemapPlugins.cc create mode 100644 proxy/http/remap/RemapPlugins.h create mode 100644 proxy/http/remap/RemapProcessor.cc create mode 100644 proxy/http/remap/RemapProcessor.h create mode 100644 proxy/http/remap/UrlMapping.cc create mode 100644 proxy/http/remap/UrlMapping.h create mode 100644 proxy/http/remap/UrlMappingPathIndex.cc create mode 100644 proxy/http/remap/UrlMappingPathIndex.h create mode 100644 proxy/http/remap/UrlRewrite.cc create mode 100644 proxy/http/remap/UrlRewrite.h create mode 100644 proxy/http/stats.memo create mode 100644 proxy/http/test_http_client.pl create mode 100644 proxy/http/test_proxy.pl create mode 100644 proxy/http/test_socket_close.cc create mode 100644 proxy/http/testheaders.cc create mode 100644 proxy/ink_icon.ico create mode 100644 proxy/issues.txt create mode 100644 proxy/logcat.cc create mode 100644 proxy/logging/Log.cc create mode 100644 proxy/logging/Log.h create mode 100644 proxy/logging/LogAccess.cc create mode 100644 proxy/logging/LogAccess.h create mode 100644 proxy/logging/LogAccessHttp.cc create mode 100644 proxy/logging/LogAccessHttp.h create mode 100644 proxy/logging/LogAccessICP.cc create mode 100644 proxy/logging/LogAccessICP.h create mode 100644 proxy/logging/LogAccessTest.cc create mode 100644 proxy/logging/LogAccessTest.h create mode 100644 proxy/logging/LogBuffer.cc create mode 100644 proxy/logging/LogBuffer.h create mode 100644 proxy/logging/LogBufferSink.h create mode 100644 proxy/logging/LogCollationAccept.cc create mode 100644 proxy/logging/LogCollationAccept.h create mode 100644 proxy/logging/LogCollationBase.h create mode 100644 proxy/logging/LogCollationClientSM.cc create mode 100644 proxy/logging/LogCollationClientSM.h create mode 100644 proxy/logging/LogCollationHostSM.cc create mode 100644 proxy/logging/LogCollationHostSM.h create mode 100644 proxy/logging/LogConfig.cc create mode 100644 proxy/logging/LogConfig.h create mode 100644 proxy/logging/LogField.cc create mode 100644 proxy/logging/LogField.h create mode 100644 proxy/logging/LogFieldAliasMap.cc create mode 100644 proxy/logging/LogFieldAliasMap.h create mode 100644 proxy/logging/LogFile.cc create mode 100644 proxy/logging/LogFile.h create mode 100644 proxy/logging/LogFilter.cc create mode 100644 proxy/logging/LogFilter.h create mode 100644 proxy/logging/LogFormat.cc create mode 100644 proxy/logging/LogFormat.h create mode 100644 proxy/logging/LogHost.cc create mode 100644 proxy/logging/LogHost.h create mode 100644 proxy/logging/LogLimits.h create mode 100644 proxy/logging/LogObject.cc create mode 100644 proxy/logging/LogObject.h create mode 100644 proxy/logging/LogPredefined.cc create mode 100644 proxy/logging/LogPredefined.h create mode 100644 proxy/logging/LogSock.cc create mode 100644 proxy/logging/LogSock.h create mode 100644 proxy/logging/LogStandalone.cc create mode 100644 proxy/logging/LogUtils.cc create mode 100644 proxy/logging/LogUtils.h create mode 100644 proxy/logging/Makefile.am create mode 100644 proxy/logging/Makefile.in create mode 100644 proxy/logstats.cc create mode 100644 proxy/regression.pl create mode 100644 proxy/sac.cc create mode 100644 proxy/shared/DiagsConfig.cc create mode 100644 proxy/shared/DiagsConfig.h create mode 100644 proxy/shared/Error.cc create mode 100644 proxy/shared/Error.h create mode 100644 proxy/shared/InkXml.cc create mode 100644 proxy/shared/InkXml.h create mode 100644 proxy/shared/Makefile.am create mode 100644 proxy/shared/Makefile.in create mode 100644 proxy/shared/UglyLogStubs.cc create mode 100644 proxy/shared/signals.cc create mode 100644 proxy/shared/signals.h create mode 100644 proxy/spdy/Makefile.am create mode 100644 proxy/spdy/Makefile.in create mode 100644 proxy/spdy/SpdyCallbacks.cc create mode 100644 proxy/spdy/SpdyCallbacks.h create mode 100644 proxy/spdy/SpdyClientSession.cc create mode 100644 proxy/spdy/SpdyClientSession.h create mode 100644 proxy/spdy/SpdyCommon.cc create mode 100644 proxy/spdy/SpdyCommon.h create mode 100644 proxy/spdy/SpdyDefs.h create mode 100644 proxy/spdy/SpdySessionAccept.cc create mode 100644 proxy/spdy/SpdySessionAccept.h create mode 100644 proxy/test_xml_parser.cc create mode 100644 proxy/tests/logstats.blog create mode 100644 proxy/tests/logstats.json create mode 100644 proxy/tests/logstats.summary create mode 100755 proxy/tests/test_logstats_json create mode 100755 proxy/tests/test_logstats_summary create mode 100644 rc/Makefile.am create mode 100644 rc/Makefile.in create mode 100644 rc/solaris.txt create mode 100644 rc/start_traffic_shell create mode 100644 rc/trafficserver.conf.in create mode 100644 rc/trafficserver.in create mode 100644 rc/trafficserver.service.in create mode 100644 rc/trafficserver.xml.in create mode 100644 tools/Makefile.am create mode 100644 tools/Makefile.in create mode 100755 tools/apichecker.pl create mode 100644 tools/http_load/README create mode 100644 tools/http_load/http_load.c create mode 100644 tools/http_load/merge_stats.pl create mode 100644 tools/http_load/port.h create mode 100644 tools/http_load/timers.c create mode 100644 tools/http_load/timers.h create mode 100644 tools/jtest/README create mode 100644 tools/jtest/README.zh.md create mode 100644 tools/jtest/jtest.cc create mode 100644 tools/lighttpd_mod_generator/Makefile create mode 100644 tools/lighttpd_mod_generator/README create mode 100755 tools/lighttpd_mod_generator/ats_lighttpd_benchmark create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_1.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_10.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_11.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_12.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_13.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_14.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_15.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_16.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_17.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_18.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_19.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_2.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_20.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_21.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_22.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_23.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_24.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_3.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_4.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_5.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_6.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_7.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_8.conf create mode 100644 tools/lighttpd_mod_generator/conf/lighttpd_9.conf create mode 100644 tools/lighttpd_mod_generator/mod_generator.c create mode 100755 tools/reports/commits_since.sh create mode 100755 tools/traffic_shell.pl create mode 100755 tools/tspush create mode 100755 tools/tsxs.in diff --git a/.clang-format b/.clang-format new file mode 100644 index 00000000..df89c8d9 --- /dev/null +++ b/.clang-format @@ -0,0 +1,25 @@ +# BasedOnStyle: LLVM +AccessModifierOffset: -2 +AlignEscapedNewlinesLeft: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakTemplateDeclarations: false +BinPackParameters: true +ColumnLimit: 120 +ConstructorInitializerAllOnOneLineOrOnePerLine: true +DerivePointerBinding: false +IndentCaseLabels: false +MaxEmptyLinesToKeep: 2 +ObjCSpaceBeforeProtocolList: true +PenaltyBreakComment: 45 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 0 +PointerBindsToType: true +SpacesBeforeTrailingComments: 4 +SpacesInBracedLists: true +Standard: C++03 +IndentWidth: 2 +UseTab: false +BreakBeforeBraces: Linux diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..2111b7d6 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "lib/luajit"] + path = lib/luajit + url = http://luajit.org/git/luajit-2.0.git diff --git a/.indent.pro b/.indent.pro new file mode 100644 index 00000000..b0c23ca8 --- /dev/null +++ b/.indent.pro @@ -0,0 +1,29 @@ +-nut +-nbad +-bap +-nbbo +-nbc +-br +-bls +-ce +-ci2 +-cli0 +-cs +-d0 +-di2 +-nfc1 +-nfca +-hnl +-i2 +-ip0 +-l120 +-lp +-npcs +-nprs +-psl +-saf +-sai +-saw +-nsc +-nsob +-nss diff --git a/.vimrc b/.vimrc new file mode 100644 index 00000000..b810920d --- /dev/null +++ b/.vimrc @@ -0,0 +1,7 @@ +" Basic whitespace rules. If you use vim, set exrc and set secure +" to have it automatically source this file. +set tabstop=2 +set softtabstop=2 +set shiftwidth=2 +set expandtab +set textwidth=120 diff --git a/CHANGES b/CHANGES new file mode 100644 index 00000000..473c9f14 --- /dev/null +++ b/CHANGES @@ -0,0 +1,3744 @@ + -*- coding: utf-8 -*- +Changes with Apache Traffic Server 5.0.1 + *) [TS-2055] limit access to synthetic healthcheck + + +Changes with Apache Traffic Server 5.0.0 + + *) [TS-2885] Change LuaJIT to build statically, and add --disable-luajit. This + is an option that will eventually be removed as we make LuaJIT mandatory. + + *) [TS-2886] Fix regression where body factory is not used. + + *) [TS-2881] Redirection handling broken + + *) [TS-2875] Change hwloc library env variables from hwloc_ to HWLOC_. + + *) [TS-2874] Make some of the SPDY metrics persistent, and also fix how we + measure transaction time / total streams. + + *) [TS-2872] Can't set "Send Window" for SPDY/3.1 > 64k. Code mostly + provided by our SPDY brainiac, Geffon. + + *) [TS-2873] Cleanup SPDY metrics and configs, also make it use the common + pattern for stats increments/decrements (even though it's ugly, sorry amc). + + *) [TS-2870] Update SPDY defaults to better match our other defaults. + + *) [TS-2868] Error setting HSTS max age with traffic_line + + *) [TS-2776] Core dump inside openssl library + + *) [TS-2845] Commit e6b9cb533 causes problems with stats_over_http + + *) [TS-1981] Url remap and IPAllow method filtering is broken with non-wks method + and add support for arbitrary methods + + *) [TS-2865] Fix warnings in collapsed connection plugin + + *) [TS-2580] SSL Connection reset by peer errors in 4.2.0-rc0 + + *) [TS-2783] Update documentation defaults, and fix RecordsConfig.cc. + + *) [TS-2792] Large request header causes unexpected remap. + Author: Masakazu Kitajo + + *) [TS-2834] header_rewrite: Add Internal transaction and client-IP + conditions. + + *) [TS-2855] Add the TSHttpIsInternalSession API. + + *) [TS-2859] Remove DBG macros to not generate warnings from GCC 4.9. + + *) [TS-2858] Build failures on OmniOS. Also add some LuaJIT flags as per + Theo's and Daniel's recommendations. + + *) [TS-2857] Cleanup SPDY and SSL stat names + + *) [TS-2856] Remove proxy.config.spdy.verbose_in and use diags instead + + *) [TS-2391] Traffic Server tries to reverse resolve 127.0.0.1 + + *) [TS-2837] Dangling pointer in URLImpl which may cause core dump + + *) [TS-2842] Can't set SPDY inactivity timeout with traffic_line + + *) [TS-2618] IOBufferBlock::realloc()'s bounds check is wrong. + + *) [TS-2850] Fix logging of response header length. + + *) [TS-2839] tsxs does not work on OSX (Darwin). + Author: Masakazu Kitajo + + *) [TS-2820] Add check to Transformations in C++ api to prevent closing + connections twice. + + *) [TS-2804] Add regex_revalidate plugin to explerimental plugins directory. + + *) [TS-2528] Use in the public mgmtapi.h interface. This follows + the C99 standard, and we should move other public APIs to it. + + *) [TS-2428] Move P_Freer.h continuations to run on ET_TASK (if available). + + *) [TS-2344] 404 error was logged while url redirect request was processed + correctly. + + *) [TS-2753] Add more SPDY and HTTPS statistics. + + *) [TS-2677] Don't apply path / scheme URL changes in remap when method is + CONNECT. + + *) [TS-2308] includedir in config.layout is not used. + + *) [TS-2527] mgmtapi.h should be C style. This is slightly ugly in the core + now, but that public struct/union has to be named to be C99 compliant. + + *) [TS-2838] Add logging fields for plugins to TS connect API. Use for SPDY. + + *) [TS-2833] Remove REC_BUILD_STAND_ALONE, REC_BUILD_MGMT and + REC_BUILD_STUB. Dead code. + + *) [TS-2547] Remove Resource.c/h, and NEW as well. + + *) [TS-1588] slow log should include client addr + + *) [TS-2728] The lib/perl Makefile.am does not properly detect in-source + builds, generating errors. + + *) [TS-2723] add new features to ts_lua plugin. + Author: Quehan + + *) [TS-2816] Bump the minor cache version for ATS 5.0 release + + *) [TS-2764] Remove when_to_add_no_cache_to_msie_requests configuration. + + *) [TS-2737] Rename rfc5861 plugin to stale_while_revalidate. + + *) [TS-2400] Our default SSL cipher-suite advocates speed over security + + *) [TS-2818] TSHttpTxnServerAddrSet() doesn't update the server port + + *) [TS-2793] Remove UnixNetVConnection::selected_next_protocol. + + *) [TS-2831] Add SPDY stream count statistic. + + *) [TS-1665] Remove the old traffic_shell (R.I.P.). + + *) [TS-2830] Make SPDY configurable. + + *) [TS-2684] Add a text-log format to background_fetch plugin. + + *) [TS-2577] Tracing on e.g. -T http_hdrs does not show Proxy Request + headers accurately. Author: Masakazu Kitajo + + *) [TS-1486] Drop support for Sun Studio compilers. + + *) [TS-2765] Memory Leak in SSLConfig initialization + + *) [TS-1125] POST's with Expect: 100-continue are slowed by delayed 100 + response + + *) [TS-2826] SPDY implementation does not support OPTIONS and TRACE methods + + *) [TS-2274] Minimize the default records.config, and also fix a number of + inconsistencies and missing configs in RecordsConfig.cc. This is as per + discussions on IRC etc. I have verified that traffic_line -m reports the + same internal values for all configs before and after this change. In + addition, the following defaults are changing: + + - proxy.config.url_remap.pristine_host_hdr -> 0 + - proxy.config.http.normalize_ae_gzip -> 1 + - proxy.config.http.cache.ignore_client_cc_max_age -> 1 + - proxy.config.http.background_fill_active_timeout -> 0 + - proxy.config.http.background_fill_completed_threshold -> 0.0 + - proxy.config.cache.enable_read_while_writer -> 1 + - proxy.config.net.sock_send_buffer_size_in -> 0 + - proxy.config.cache.permit.pinning -> 1 + - proxy.config.diags.show_location -> 1 + - proxy.config.dns.round_robin_nameservers -> 1 + - proxy.config.http.connect_ports -> 443 + - proxy.config.log.custom_logs_enabled -> 1 + - proxy.config.spdy.client.max_concurrent_streams -> 100 + - proxy.config.env_prep -> NULL + + *) [TS-2824] Revert TS-2592. + + *) [TS-2632] Do not lock the object in cache (by default) on Range + requests. This adds proxy.config.http.cache.range.write. + + *) [TS-2822] Crash in LogBufferIterator::next + + *) [TS-2705] Make the background fill check more robust. + + *) [TS-1411] Seg fault when using % + + *) [TS-2739] ATS doesn't send back Transfer-Encoding when client keep-alive + is turned off + + *) [TS-2253] PluginVC::process_close Segmentation fault + + *) [TS-2823] Change the Mgmt API names TSEvent and TSError to avoid name + collisions with the plugin API. + + *) [TS-2029] Eliminate CacheHttpHdr argument from Cache::generate_key(). + + *) [TS-2821] Added configuration for max concurrent streams for SPDY. + + *) [TS-2558] Remove check for inbound transparency vs. SSL. + + *) [TS-2810] Add the TSVConnFdCreate API. + + *) [TS-2342] Problem with cache.cache_responses_to_cookies value 0. + Author: Paul Marquess + + *) [TS-2751] Remove the ProtocolNetAccept layer. + + *) [TS-2815] SSL orgin server connection hangs if ssl handshake is slow + + *) [TS-2788] Make proxy.config.alarm.bin reloadable. + + *) [TS-2811] Error logged in regex_remap when lowercase_substitutions option + is used. + + *) [TS-2791] SPDY POST transactions failing with ERR_CLIENT_ABORT. + + *) [TS-2805] Client connections are connecting with SPDY 3 instead of 3.1. + + *) [TS-2619] Changed TSRecordDump declaration from TSRecordType to int to + accommodate bit-masks. Also changed TSRecordType enums to hexidecimal, as + this is easier to read for bit arguments. + + *) [TS-2797] Build all manual pages in the doc/reference/api directory. + + *) [TS-2733] Do not build the old SPDY plugin. + + *) [TS-2716] Fix indentation for ts_lua plugin. + + *) [TS-2789] Typo in HttpSessionManger would cause ATS reuse wrong session + to origin server. + + *) [TS-2636] Enhance ATS custom logging to support WIPE_FIELD_VALUE filter + action. + + *) [TS-2786, TS-2784] Make Lua and header_rewrite plugins compile on FBSD. + + *) [TS-1375] Setting default inactivity timeout, if one is not set, to 1 + day. + + *) [TS-2780] Core dump in SpdyRequest::clear() in production testing of + SPDY. + + *) [TS-2744] Remove TSNetAcceptNamedProtocol assertion for unknown + protocols. + + *) [TS-898] Stop the esi plugin referencing invalidated strings. + + *) [TS-2778] Websockets remap doesn't properly handle the implicit port for + wss. + + *) [TS-2774] TS::AdminClient.pm's get_stat API broken in 5.0. + + *) [TS-2766] HdrHeap::coalesce_str_heaps doesn't properly calculate new heap + size. + + *) [TS-2767] ATS Memory Leak related to SPDY. + + *) [TS-2757] Fix logging crashes on reconfiguration. + + *) [TS-2770] Allow proxy.config.log.rolling_interval_sec to be as low as 60sec. + + *) [TS-2772] Clean up mgmt/preparse code. + + *) [TS-2763] Remove the unused proxy.config.log.xuid_logging_enabled config + setting. + + *) [TS-2762] Core dump when benchmarking SPDY. + + *) [TS-2755] Document TSTextLogObjectRollingEnabledSet. + + *) [TS-2746] Rename session accept layer with a standard convention. + + *) [TS-2754] Emit a tcpinfo log on TS_EVENT_HTTP_TXN_CLOSE. + + *) [TS-2760] Add TSFetchClientProtoStackSet/Get() API in experimental.h. + + *) [TS-2743] Ignore invalid HTTP headers in SpdyNV carefully. + + *) [TS-2742] TSFetchCreate should accept both IPv4 and IPv6. + + *) [TS-2736] Add config option to set the max open files limit for the + traffic_server process to some percentage of the fs.file-max proc value on + Linux. The default is 90%. + + *) [TS-2741] Add a server intercept example plugins and documentation. + + *) [TS-2616] Sanitize duplicate Transfer-Encoding: chunked headers. + Author: Dimitry Andric + + *) [TS-645] Remove broken hard restart from traffic_shell. + + *) [TS-2562] Improve init script support for Debian/Ubuntu. + Author: Tomasz Kuzemko + + *) [TS-2735] Align all memory during freelist allocations. + + *) [TS-2120] remove stale_while_revalidate plugin. + + *) [TS-2732] Add url_sign (Signed URL's) plugin. + Author: Jan van Doorn + + *) [TS-2650] Redirect handling enhancements in ATS. + + *) [TS-2548] Add client IP to SSLError() calls in SSLNetVConnection + + *) [TS-2710] ATS serves the wrong cert because it matches wildcard certs + incorrectly. + + *) [TS-2704] Core dump in state_raw_http_server_open getting EVENT_INTERVAL + event. + + *) [TS-2720] Fix bug with request transformations in C++ api + + *) [TS-2714] Promote the tcp_info plugin to stable as 'tcpinfo'. + + *) [TS-2708] Refactor and modernize the tcp_info plugin. + + *) [TS-2555] Adding global plugin support to ts_lua plugin. + + *) [TS-2717] header-rewrite set-redirect not working. + Author: Igor Brezac + + *) [TS-2715] Fix some ESI compile warnings, change the constants to + be part of an include. + + *) [TS-898] Remove pointless NULL check on address of array. + + *) [TS-898] Avoid passing -1 to close (2) in traffic_cop. + + *) [TS-898] Fix minor regex_remap parsing bug. + + *) [TS-2711] Include Lua JIT as a Submodule. + + *) [TS-898] Ensure the cache span probe always closes file descriptors. + + *) [TS-2706] Replace the tcp_info plugin config file with options. + + *) [TS-2691] Fix how we count redirect retries in the core and APIs. + + *) [TS-2693] Deprecate the old TSRedirectUrlSet()/TSRedirectUrlGet(). + + *) [TS-2692] Add an API to Get the current redirection retry count, + TSHttpTxnRedirectRetries(). + + *) [TS-2707] Migrate TSRedirectUrlSet/Get to TSHttpTxnRedirectUrlSet/Get. + + *) [TS-2690] Fixes and improvements for the escalate plugin. + + *) [TS-2699] Add TSClientProtoStackCreate API. + + *) [TS-2678] Some sites (e.g. craigslist) fails to load due to + incorrect truncated response detection. + + *) [TS-2603] Do not update HostDB for server intercept requests. + Author: Quehan + + *) [TS-2622] Optimize the ink_cluster_time() function. + Author: Yu Qing + + *) [TS-2701] Silence traffic_cop logging on chown of missing lock files. + + *) [TS-2656 Determine server connection scheme immediately before + connecting. Author: Ron Barber + + *) [TS-2578] Close the client connection when you close TransformTerminus. + Author: Jack Bates + + *) [TS-2657] Eliminate TSHttpTxnSetHttpRetBody() and improve upon + TSHttpTxnErrorBodySet(). This also reduces the size of the HttpSM by 25%. + + *) [TS-2589] Don't hold up the response until the server starts sending + content. Author: Jack Bates + + *) [TS-2687] Support for MIPS paltform. + Author: Dejan Latinovic + + *) [TS-2688] atscppapi: new example plugin Boom. + Author: Omer Shapira + + *) [TS-2686] Change background_fetch to defer cacheability check until + SEND_RESPONSE_HDR hook. This allows for other READ_REQUEST_HDR hooks to + modify the response header before we evaluate. + + *) [TS-2679] background_fetch plugin can use uninitialized cont pointer. + + *) [TS-2675] metalink: Fix crash and plug memory leaks. + Author: Jack Bates + + *) [TS-2674] Remove debug printf() from traffic_top. + + *) [TS-2652] atscppapi: Providing a cancel() in asyncproviders . + + *) [TS-2667] atscppapi: Allow intercept plugins to access request headers + object. + + *) [TS-2672] TSMimeHdrFieldDestroy should not assert on removed fields. + + *) [TS-2554] New plugin: background_fetch, which under certain conditions + will kick off a background fetch when it detects Range request and + responses. This allows for the cache to start populating objects that would + otherwise not be cacheable. This is ideally used together with the + read-while-writer feature as well. + + *) [TS-2671] Restore missing .useflt remap directive. + + *) [TS-2654] Crash in Range requests with read-while-writer. + + *) [TS-2665] Clean up ink_stack_trace_dump code + Thanks to Alexey Ivanov for reporting this issue. + + *) [TS-2663] Wrong condition on log code for server connection errors. + Author: David Binderman + + *) [TS-2664] atscppapi: Removing initializable values. + + *) [TS-2660] Rename StateMachineAction_t values for legibility. + + *) [TS-2662] Re-enable KEEP_ALIVE_POST_OUT by default. + + *) [TS-2661] Remove unused HttpSM::decided_cached_url. + + *) [TS-2658] Additional debug logging for SSL certificates. + + *) [TS-2431] Migrate Taobao SPDY plugin to ATS core. + + *) [TS-2651] atscppapi: race conditions in destruction of async providers + + *) [TS-2646] regex_remap: Add a new option, @caseless. + + *) [TS-2647] atscppapi: Bug fixes in headers and atscppapi. + + *) [TS-2598] Expose HttpDebugNames to public plugin APIs. + + *) [TS-2639] Release HttpClientSession objects back to the proxy allocator. + + *) [TS-2637] Add traffic_line records match option. + + *) [TS-2630] Add lib/ts/apidefs.h to place common types. + + *) [TS-2610] Add "client_protocol_stack"(%) field into LogFormat. + + *) [TS-2612] Indroduce TSHttpConnectWithProtoStack() API. + + *) [TS-1062] Extends and optimizes FetchSM. + + *) [TS-2631] Header_rewrite should support changing destination in non-remap + case. + + *) [TS-2623] Remove the limit on the number of cachurl regular expressions. + + *) [TS-2628] traffic_line should tell you when a reload is needed. + + *) [TS-2627] Reduce management socket code duplication. + + *) [TS-2625] trafficserver.in doesn't use TS_BASE. + + *) [TS-2624] Make thread affinity more robust. + + *) [TS-2620] Make the stats_over_http plugin publish node and plugin stats. + + *) [TS-2614] Response to invalid Content-Length for POST should be a 400 + error. Author: Ron Barber + + *) [TS-2615] Better logging and error handling in SSL client session startup. + + *) [TS-2613] Can't turn on attach server session to client from + records.config. + + *) [TS-2611] Add a new S3 authentication plugin, s3_auth. This only supports + the v2 features of the S3 API. + + *) [TS-2522] Better hook management for header_rewrite plugin, and some + cleanup. + + *) [TS-2169] Add SSL statistics + Author: Ron Barber + + *) [TS-2607] Fix TS_USE_HWLOC #define in build process + + *) [TS-2595] DNS lookup failed with multiple search domains. + Author: Thach Tran + + *) [TS-2019] Fix the "vector inconsistency" errors. + + *) [TS-2585] Add overridable configs support to regex_remap plugin. + Author: Ethan Lai + + *) [TS-2569] Set the default SSL options correctly. + Author: Ron Barber + + *) [TS-2599] Remove dead code in RedCore related to Record Types + + *) [TS-2593] HTTPS to origin fails on CentOS6.x. This is a regression of + sort from TS-2355. + + *) [TS-2576] Add Oct/Hex escape representation into LogFormat + + *) [TS-2494] fix the crash that return the stale cached document + when os is down, even if it`s status is not 200 (ok). + + *) [TS-2590] Translate documentation into Japanese. + Authors: Masaori Koshiba + Masakazu Kitajo + syucream + + *) [TS-2586] Improvements to the internal implementation of overridable + configuration lookups (_conf_to_memberp() ). + Author: Yu Qing + + *) [TS-2210] Add a plugin API to manipulate the session's SSL context. + Author: Kang Li + + *) [TS-2542] Turn on caching zero length responses by default. + + *) [TS-2405] Change the default for proxy.config.net.sock_option_flag_out to + 1, which enables TCP_NODELAY. + + *) [TS-2531] The default remap rule doesn't match a forward proxy request + + *) [TS-612] Add support for SSL keys with passphrases. + Author: Ron Barber + + *) [TS-2319] Change default behavior for the various ignore-mismatch + configurations. + + *) [TS-2563] Always set the SSL default verify paths. + Author: Wei Sun + + *) [TS-2437] Add a lifecycle hook to expose loaded SSL certificates to + plugins. Author: Wei Sun + + *) [TS-2582] Make traffic_cop debugging eadier by logging to stdout. + + *) [TS-2579] Remove ipv4 limit for FetchSM and TSFetchUrl/TSFetchPages. + + *) [TS-1893] Add more options to server session control. + + *) [TS-2239] Initial ALPN TLS extension support. + + *) [TS-2568] Better template messages. + + *) [TS-2567] Add --install-test-tools configure option. + + *) [TS-1622] Add a new API, TSHttpTxnIsCacheable(), which allows a plugin + to determine whether a request (and/or response) would be cacheable. + + *) [TS-2560] regex_remap lowercase substitutions is not being initialized in + the construtor + + *) [TS-2559] Disconnect clients on unrecoverable origin errors. + + *) [TS-2556] Fix ink_hrtime_diff_msec. + + *) [TS-2553] Fix a segfault in the Metalink plugin reported by Faysal Banna + and preserve the Content-Length header + Author: Jack Bates + + *) [TS-2195] Remove the (deprecated) TSHttpTxnCacheLookupSkip API. + + *) [TS-2229] Deprecate the header_filter plugin, use header_rewrite instead. + + *) [TS-2290] Remove X-ID special log tag, and cleanup HdrToken confusion. + + *) [TS-2088] Change TSRecordType enum values to powers of two + + +Changes with Apache Traffic Server 4.2.0 + + *) [TS-2552] configure fails to detect missing the #define for + SSL_CTX_set_tlsext_ticket_key_cb(). + + *) [TS-2549] printf() compiler warnings on OSX (clang) with the CPP APIs. + + *) [TS-2532] Fix make distclean for C++ API examples. + + *) [TS-2306] Client connection hang while downloading big file from origin + server over SSL connection + + *) [TS-2353] Add ability to load ssl certs that are owned by root and only + read only by the user + + *) [TS-2551] Eliminate the tr1 dependency from CPP APIs. + + *) [TS-2541] Add WebSocket support + + *) [TS-2550] Add inline configuration overrised to the conf_remap plugin. + + *) [TS-2546] Move xptr and _xstrdup to ink_memory.{h,cc}. + + *) [TS-2180] Small memory leak in RecCore.cc, from previous refactoring. + + *) [TS-2534] Make sure RecRecord structs are always properly initialized. + + *) [TS-2483] Add a new metric, proxy.node.restarts.proxy.cache_ready_time, + tracking absolute time when the cache started (finished + initialization). Until cache is available, or no cache configured, this + metric stays at a value of "0". + + *) [TS-2517 First implementation of traffic_shell.pl, and reorg the modules + a bit for more consistency. Also fixes a timeout issue in AdminClient.pm. + + *) [TS-1467] Disable client initiated renegotiation (SSL) DDoS by default + + *) [TS-2538] Cleanup of ProcessMutex (unused) and InkMutex (dupe of + ink_mutex). We now use ink_mutex consistently. + + *) [TS-2544] conf_remap plugin: allow for multiple configuration files. + + *) [TS-2530] Check for loopback interfaces when computing the local address. + Author: Ron Barber rbarber@yahoo-inc.com + + *) [TS-2031] Prevent duplicate SSL SNI name registration. + Author: Feifei Cai + + *) [TS-2501] Refactor and improve performance for the case without + expansions. Review: Alexey Ivanov . + + *) [TS-2533 ] Add three commands previously provided by traffic_shell. + + *) [TS-2304] Make the healthcheck plugin watch for file permission changes. + + *) [TS-2519] Make build version metrics non-persistent. + + *) [TS-1606] Log buffers are not flushed periodically when TS is launched + with NO_REMOTE_MANAGEMENT flag + + *) [TS-2481] Incorrect origin server port used sometimes (with keep-alive). + Author: Dimitry Andric + + *) [TS-2526] Remove the g_stats_snap_fpath global variable. + + *) [TS-2525] Remove restrictions on outbound transparency with SSL. + + *) [TS-2425] Update to TS-2261 for loading plugins as root + + *) [TS-2505] Add traffic_line --offline option. + + *) [TS-2305] Fall back to ftruncate if posix_fallocate fails. + + *) [TS-2504] Support OpenSSL installations that use the lib64 directory. + + *) [TS-799] Have AdminClient.pm created from .in file. + + *) [TS-2509] Add the const qualifier to pure HttpTunnel member functions. + + *) [TS-2508] Add a *highly* experimental escalation plugin. + + *) [TS-2507] Fix the state transition logging in HttpSM::handle_server_setup_error. + + *) [TS-1648] Segmentation fault in dir_clear_range() + + *) [TS-2500] Fix handling of cache stripe assignment when a disk is + taken offline. + + *) [TS-2499] Fix the header_rewrite plugin expansions of the new %<> + strings. Author: Alexey Ivanov . + + *) [TS-2498] Add a build option to install the example plugins. + + *) [TS-2484] Add API support for the two missing overridable APIs: + proxy.config.http.cache.max_open_read_retries + proxy.config.http.cache.open_read_retry_time + + *) [TS-2497] Failed post results in tunnel buffers being returned to + freelist prematurely. + Reporter: Thomas Jackson + + *) [TS-1668] Added HSTS configuration options to ATS + + *) [TS-2495] Reduce the size of HttpVCTableEntry. + + *) [TS-2491] stop other esi plugin unit test programs after error. + Author: Yu Qing + + *) [TS-1821] make the AIO test pass when build with native aio. + + *) [TS-2412] fix the bug of restarting ATS causes complete cache data + loss when use linux-native-aio. + + *) [TS-2488] enhance esi plugin to allow any of the space characters to + follow esi starting tags. + Author: Yu Qing + + *) [TS-2489] Fix esi plugin problem with contents in comments output + twice when the node list is cached. + Author: Yu Qing + + *) [TS-2336] First attempt at moving the Prefetch APIs into some usable + state. There's still work to be done here, but separate bugs for that. + + *) [TS-2487] Export PUSH HTTP method constants to the TS API. + + *) [TS-2486] Eliminate SIMPLE_MEMCPY_INIT define. + + *) [TS-2476] Fix size_t format string. + Author: Radim Kolar + + *) [TS-2471] Writing records.config can fail when the disk is full. + Author: Yu Qing + + *) [TS-2479] Don't output orphan log after failover sucessfully. + + *) [TS-2370] SSL proxy.config.ssl.server.honor_cipher_order is backwards. + Changed the default setting and changed the meaning of it in the code. + + *) [TS-2466] NOT increase the version of records.config when changing + the local parameter + Author: Yu Qing + + *) [TS-2469] remove libreadline which is gpl licensed. + Author: Ben Aitchison + + *) [TS-2235] url_print should NOT output "?" for empty query string, + fix two remains. + Author: Yu Qing + + *) [TS-2475] Adding new transaction methods in C++ API + + *) [TS-2474] Change proxy.config.net.poll_timeout to 10ms consistently. + + *) [TS-2473] Fix C++ API includes for FreeBSD. + Author: Radim Kolar + + *) [TS-2467] traffic_shell doesn't work with tcl 8.6. + Author: Ben Aitchison + + *) [TS-1365] Add a new configuration option, proxy.config.net.poll_timeout, + with the same behavior as the command line option --poll_timeout. Also + adjust AIO scheduling to correlate to this setting, to avoid additional + CPU load. Note that this configuration is generally not necessary to + configure, unless you are concerned with system idle CPU consumption. + + *) [TS-2468] Bring back the load balancer plugin. + + *) [TS-2465] libxml2 detection generates an invalid linker path. + Author: Radim Kolar + + *) [TS-2271] Threaded plugin support with 3rd party libraries. + Author: Heikki Hannikainen + + *) [TS-2464] Remove useless and buggy connection header handling + + *) [TS-2457] Protocol.c: change usage of atoi to strtol. + Author: Radim Kolar + + *) [TS-2459] Fix wrong name for a couple of librecord APIs. + Author: Yu Qing + + *) [TS-2463] Crash regression around slow-log feature, when logging an + event. This fixes commit c290ce0df2a. + + *) [TS-32] Fix ICP. Author: Gota Adachi + + *) [TS-2248] Segmentation fault in HttpTunnel with flow control. + Author: bettydramit + + *) [TS-2454] Fix undefined reference to `__sync_fetch_and_sub_8' on ARM + 32bit system. + + *) [TS-2450] Fix assertion failure for T61String type. + + *) [TS-2117] make hipes plugin build. + + *) [TS-2452] Can't access a deleted object. + + *) [TS-2363] Fix assertion of "Unknown file format type!". + + *) [TS-2448] Fix traffic_cop and traffic_manager to obey the + proxy.config.local_state_dir setting. + + *) [TS-2445] Fix problem with 204 responses closing POST requests. + + *) [TS-2434] Use the FATAL error level to handle plugin errors. + + *) [TS-2203] Clarify syslog startup messages for standalone log programs. + + *) [TS-2436] Add a simple integration test harness. + + *) [TS-2355] ATS 4.0.x crashes when using OpenSSL 1.0.1e. + + *) [TS-2432] Fix a race in aio_err_callblk. + + *) [TS-2251] Simplify LogBuffer reference counting. + + *) [TS-2190] Remove cache.log from the cachurl plugin. + + *) [TS-2426] Add a new plugin, xdebug, for cache debugging using HTTP + headers. + + *) [TS-2077] Remove pipeline configurations, they were no-op's anyways. We + still support pipelining (we always do), it's just not treated specially + (or optimized). + + *) [TS-2386] clean up unused files and codes -- round 4.2. + + *) [TS-548] remove Initialize.cc Initialize.h + + *) [TS-2082] remove STANDALONE_IOCORE FIXME_NONMODULAR and NON_MODULAR + defines. + + *) [TS-312] Add option to always share keep-alive connections to the origin + server. + + *) [TS-2419] Don't close client connection when responding with a 204 and + there is no body. + + *) [TS-1146] Add RFC 5077 TLS session ticket support. + Author: Wei Sun + + *) [TS-2401] Use Layout instead of global install path directories. + + *) [TS-2420] Remove STAT_SYNC, CONF_SYNC, and REM_SYNC threads and schedule + those continuations in ET_TASK. + + *) [TS-2372] Enable TLS perfect forward security with ECDHE. + + *) [TS-2416] Make TLS the session timeout threshold configurable. + Author: Wei Sun + + *) [TS-2335] adding ts_lua plugin to experimental directory. + + *) [TS-2347] buffer_upload uses unsafe function tempnam(). Replace it + with mkstemp(). + + *) [TS-1815] Add thread number and port to accept thread name and + add the file descriptor number to the ET_AIO thread names. + + *) [TS-2415] Use standard continuations to release UrlRewrite objects. + + *) [TS-2413] Release memory for idle SSL connections. + Author: Wei Sun + + *) [TS-2365] Configure the maximum TLS record size. + Author: Wei Sun + + *) [TS-2351] Bandaid fix for Range request crash related to + Read-While-Writer and content length calculations. + + *) [TS-2408] Fix double free of proxy.config.admin.user_id. + + *) [TS-2396] UrlRewrite.cc does not free the queue correctly. + Author: Yu Qing + + *) [TS-2382] Partial fix for make install creating man files as root. + + *) [TS-2381] Reorganize TSUrlCreate docs into separate files. + + *) [TS-2377] Install man pages as part of the build. + + *) [TS-2379] Add a new field: '%', "client_host_port" to LogFormat. + + *) [TS-2374] Abort the producer if the none of it`s consumers is alive. + + *) [TS-2330] Update proxy.config.body_factory.enable_customizations comments + in records.config. + + *) [TS-2327] TSRedirectUrlSet does not perform DNS lookup of redirected OS. + + *) [TS-1468] Check vary and accept headers on non-200 responses in cache. + + *) [TS-2352] refine THREAD_ALLOC feature. + + *) [TS-2364] Introduce slice notation to field syntax in log format. + + *) [TS-2360] Fix usage of TSMimeHdrFieldValueStringGet() IDX in some plugins. + + *) [TS-2361] Load regex_remap configuration relative to the configuration + directory. + + *) [TS-2359] Make install over existing installation can fail. + + *) [TS-2350] Enhancements to traffic_top. + + *) [TS-2348] Rename tstop to traffic_top. + + *) [TS-2384] Fix regression in key-lookup code between 4.0.x and 4.1.x. + + *) [TS-2340] Fix TextLogObject log rolling. + Author: bettydramit + + *) [TS-2343] Remove the --schema option from Traffic Manager, and the code + around it. + + *) [TS-2316] header_rewrite: numerous improvements: cookie based conditions, + rule counters, improved documentation. + Author: Alexey Ivanov + + *) [TS-2338] Remove IPRange.cc and .h, and SocksParser.cc. + + *) [TS-2333] Change the SAX callbacks to not clash with libxml2, which broke + synthetic metric completely. + + *) [TS-2339] Cleanup Makefile.am, fixing missing / wrong _SOURCES entries. + + *) [TS-2341] Cast TSHttpStatus to int to suppress compiler warning in clang. + + *) [TS-2303] Incorrect docs for negative_caching_enabled. + Author: Thomas Jackson + + *) [TS-2712] Explicitly use subdir-objects in automake init. + + *) [TS-2309] Allow mod_generator plugin for lighttpd to accept "SI" postfixes. + + +Changes with Apache Traffic Server 4.1.0 + + *) [TS-2252] Fix bison version check on Ubuntu. + + *) [TS-2108] Fix TSConfig to build with bison 3.0 + + *) [TS-2311] ESI: Support responses that are of other text content type as + well as non-200 status response. Author: Kit Chan + + *) [TS-2321] C++ API: Clean up header code to not use STL containers and use + structures directly. + + *) [TS-2323] Implement a .include directive for remap.config. + + *) [TS-2322] Set PCRE malloc hooks globally. + + *) [TS-1955] Range: requests during read-while-writer gets the wrong + Content-Length. + + *) [TS-2245] This adds a '2' config state to the ignore mismatch configs. + + *) [TS-2178] Force keep alive off if re-using client 4-tuple and server is + not keep alive. + + *) [TS-2264] Fixed problem with EADDR_NOTAVAIL handling. + + *) [TS-2317] Read/write mutex of PluginVC may be held without release. + Author: portl4t.cn@gmail.com + + *) [TS-2315] ESI Plugin: fetcher does not handle error gracefully. + Author: Kit Chan + + *) [TS-2115] buffer_upload hard-codes "nobody" user/group. + Author: Kit Chan + + *) [TS-2008] Cache control with multiple suffixes. + Author: bettydramit + + *) [TS-2247] ua close time in milestone may not set. + Author: Gang Li + + *) [TS-2302] Log collation causes error logging to stop. + + *) [TS-2200] TS_HRTIME_xxx in experimental.h is invalid to use. + Author: Yu Qing + + *) [TS-2235] url_print should NOT output "?" for empty query string. + Author: Yu Qing + + *) [TS-2292] the version of records.config increases unexpectly. + Author: Yu Qing + + *) [TS-2277] cluster alarm messages broadcast infinitely. + Author: Yu Qing + + *) [TS-2276] manager memory leak in some case. + Author: Yu Qing + + *) [TS-1638] less strict on kernel version checking in clustering. + Author: Yu Qing + + *) [TS-2265] Remove unused log tags for prob, prcb and cgid. + + *) [TS-2301] Replace the CACHE_READY macro with CacheProcessor::IsCacheReady. + + *) [TS-2300] Remove the HIT_EVACUATE build option. + + *) [TS-2227] Allow for multiple config files for a header_rewrite plugin + invocation (be it in remap.config or plugin.config). + + *) [TS-2230] header_rewrite should support the same hook-management that + header_filter does for remap rules. This allows per-remap rules that + triggers in hooks other than the remap phase. + + *) [TS-2228] Add a set-config operator for header_rewrite plugin. + + *) [TS-2226] Add a set-header operator for header_rewrite plugin. + + *) [TS-2296] improve ConfigProcessor reference counting. + + *) [ TS-2295] update statvfs usage. + + *) [TS-2139] Fix PURGE twice to remove object in cache if + enable-interim-cache. + + *) [TS-2216] Fix cquuh log tag, which does not calculate the right + string length. + + *) [TS-2275] fix interim cache lossing data if the server process crash. + Author: Gang Li + + *) [TS-2291] Add remap_stats plugin to experimental. + + *) [TS-2242] Update core plugins' support_email and vendor_name for + consistency. + + *) [TS-1988] cquuc and cquup log tags can have no values. + + *) [TS-2159] Force first log rotation at proxy.config.log.rolling_size_mb. + + *) [TS-2138] Fix the bug that restarting ats cause cache data loss if + enable the native-aio. + + *) [TS-2197] Use HttpSM::main_handler to handle the client request stuff. + + *) [TS-2254] On ARM arch, ink_atomic_increment returns wrong value. + Author: Yu Qing + + *) [TS-2269] regex_remap plugin does not deal with empty path's properly. + Author: Kit Chan + + *) [TS-2270] ESI Plugin can have infinite loop while doing gunzip on + responses. Author: Kit Chan + + *) [TS-2268] Add support for opening protocol traffic sockets through the + traffic_manager. Added TSPluginDescriptorAccept into expiremental API. + + *) [TS-2266] Add a "make rat" Makefile target, to create a RAT report. This + is used for verifying licensing compliance on the entire source tree. + + *) [TS-2212] Implement the log tag for HTTP requests. This also does + a small refactoring of the "atoi" functions in lib/ts, such that we now + consistently have both prototypes for all types (with and without a + string length parameter). + + *) [TS-2261] Add config option to restore/elevate access to reading files by + root when loading plugins. + + *) [TS-2257] Healthcheck plugin can stop watching some events. + + *) [TS-2260] Avoid flooding log when log host is down. + + *) [TS-2259] Introduce failover hosts for logging system. + + *) [TS-2256] Mem stats info is bad when enable reclaimable-freelist. + + *) [TS-2255] TS should not flood printing after log space reach the limit. + + *) [TS-2245] cancel the trigger of CacheVC in openWriteCloseDataDone. + + *) [TS-2232] log level should be change from Status to Debug. + Author: Yu Qing + + *) [TS-2201] split drainIncomingChannel two thread, one handle Broadcast + message and other handle Reliable(TCP) request for supporing large cluster. + + *) [TS-2144] Avoid race on e.g. "traffic_server -Cclear" which would crash + the process intermittently. + + *) [TS-2217] remove the option to turn off body factory - setting it to 0 + will result in empty responses. + + *) [TS-2209] add support for lowercasing all substitutions in regex_remap. + + *) [TS-2187] failed assert `nr == sizeof(uint64_t)` in EventNotify::signal(). + + *) [TS-2206] The trafficserver RC script does not use absolute path to + traffic_line binary. + + *) [TS-2207] CentOS5 out of tree perl module build fails. + + *) [TS-1637] Fix nodes as idle/dead if we have not heard from them in + awhile. + + *) [TS-2185] Support to control ClusterCom::sendSharedData frequency. + + *) [TS-2195] Deprecate experimental TSHttpTxnCacheLookupSkip API. + + *) [TS-2176] Do not reset value of api_skip_cache_lookup when reading it. + Author: Corey Cossentino + + *) [TS-2191] Do not reschedule http_sm when the sm_list`s lock is not + acquired. This can lose items for the WebUI, which will be fixed in a + separate bug. + + *) [TS-2188] Fixes to make healthcheck plugin not segfault, and parse the + log files properly. Author: Scott Harris + + *) [TS-1086] Avoid edge case returning 304 to an unconditional request. + Diagnosis and patch by Mohamad Khateeb. + + *) [TS-2168] Make RecordsConfig.cc more inline with default builds. + + *) [TS-2174] traffic_shell/traffic_line miss some stats value + + *) [TS-2173] RECD_COUNTER type is missing in setTokenValue(). + + *) [TS-2165] Introduce cluster-wide logging stats. + + *) [TS-2167] Update apichecker.pl to give suggestions on additional + deprecated APIs. + + *) [TS-2163] Remove WDA_BILLING, ACC_ALARMS etc. code and definitions. + + *) [TS-2156] Fix stats trap in different type of threads. + + *) [TS-2160] Remove ats_is_ip_nonroutable and replace it with the less + confusing ats_is_ip_linklocal and ats_is_ip_private. + + *) [TS-2158] Properly mark an IP as non-routable for IPv4 and IPv6 link + local addresses as well as IPv6 private addresses and IPv4 Carrier Grade + NAT addresses from RFC 6598. + + *) [TS-2155] Make a new RAT exclude file that uses the regular expressions + now supported. Also cleanup some of minor licensing discrepancies. + + *) [TS-2148] handle_cache_operation_on_forward_server_response ignores value + of api_server_response_no_store. + Author: Corey Cossentino + + *) [TS-2147] Set server_share_sessions to 1 for 'internal' transactions in + rfc5861 plugin. + + *) [TS-2114] buffer_upload plugin defines true and false + Author: Kit Chan + + *) [TS-2116] buffer_upload plugin is in the source tree but does not build + Author: Kit Chan + + *) [TS-2094] eliminate xcrun warnings at configure time + + *) [TS-2141] Inconsistent euid cause bad mgmtapi/eventapi sockets. + + *) [TS-2137] Use eventfd instread of pthread signal/wait in ATS. + + *) [TS-287] Fix transaction_active_timeout_in does not trigger on the first + request of a Keep-Alive connection. Author: Li Gang + + *) [TS-2136] Fix the first proxy.config.http.accept_no_activity_timeout is + invalid. + + *) [TS-2089] Introduce configurable collation preproc threads. + + *) [TS-2126] Avoid unnecessary memory copy in LogHost::write() + + *)[ TS-2096] Improve SSL certificate loading error messages. + + *) [TS-2122] Enlarge the 64KB limitation of log buffer size. + + *) [TS-2061] LogFile::write_ascii_logbuffer3() can silently drop log + entries. + + *) [TS-2123] Remove useless max_entries_per_buffer option. + + +Changes with Apache Traffic Server 4.0.1 + + *) [TS-2161] TSHttpTxnHookAdd memory Leak. + Author: bettydramit + + *) [TS-2154] Lua plugin asserts traffic_server on startup. + + *) [TS-2127] Move hostdb.config to var/trafficserver, together with with the + host.db itself. + + *) [TS-1823] remap.config line continuation support. + Author: Jim Riggs + + *) [TS-1597] Document remap.config filters. + Author: Jim Riggs + + *) [TS-2132, TS-2131] ${libexecdir} and $(localstatedir} chowned + needlessly chowned to to ATS' user. + Author: Tomasz Kuzemko + + *) [TS-2130] pthread_setname_np() detection fails on various platforms. + + *) [TS-2129] Check for existence of ExtUtils::MakeMaker. + + *) [TS-2128] Don't link libGeoIP.so.1 into binaries. + + *) [TS-2112] Make libloader compile by default. + + *) [TS-2111] configure check in boost falsely requires 1.50. Reducing it to + v1.33, which is what RHEL5 ships with. + + +Changes with Apache Traffic Server 3.3.5 + + + *) [TS-2051] Fix SSL crash due to excess READ_COMPLETE events. + + *) [TS-2099] Using wrong member when setting active timeout. + + *) [TS-2102] SPDY plugin tries to setup protocol handler too early. + + *) [TS-1953] remove version checks from plugins that don't use it and + add an example showing off the version API and partial compilation. + + *) [TS-2100] Initialize the SSL/NPN registration mutex. + + * [TS-1987, TS-2097]: Remove duplicate and unused string functions. + + *) [TS-2091] Return an error from RecGetRecordOrderAndId if the stat isn't + registered. + + *) [TS-2081] Make the WCCP addr configuration LOCAL. + + *) [TS-2093] Check bounds on plugin stat creation. + + *) [TS-2092] Use of uninitialized member in HdrHeap. + + *) [TS-2052] ET_SSL thread spinning. + Author: Can Selcik + + *) [TS-2090 Make proxy.config.allocator.enable_reclaim default based on + build instructions. + + *) [TS-2086] Remove a few more unused configs. + + *) [TS-1685] Remove TS_MICRO and fellas. + + *) [TS-1255] Add more overridable configurations, and fix bugs in how we + deal with some of these (all "float" configs were completely broken). I + also modified the regression tests to be less easy to fool. + + *) [TS-1976] Prevent an invalid httpport argument being passed from + traffic_manager to traffic_server. + Author: Thach Tran + + *) [TS-2076] Removed proxy.config.http.accept_encoding_filter_enabled, which + is obsolete. + + *) [TS-2075] Cleanup around suboptimal checks on redirects. + + *) [TS-2074] Remove proxy.config.http.server_port and + proxy.config.http.server_other_ports remnants. + + *) [TS-2073] Set the defaults for request_hdr_max_size and + response_hdr_max_size properly. + + *) [TS-2071] Remove proxy.config.http.session_auth_cache_keep_alive_enabled + remnants. + + *) [TS-2072] Remove proxy.config.http.avoid_content_spoofing, and also fix a + logical bug around the tests of when to mark the request header dirty. + + *) [TS-1993] SSL certificate chains are loaded from the wrong directory. + + *) [TS-2059] Remove dead code in EnvBlock.cc and processSpawn(). + + *) [TS-2057] Removed deprecated proxy port configuration values from + records.config. + + *) [TS-1280] Add url match token about cache control rule. + + *) [TS-2064] Fix the authproxy plugin to send an error body. + + *) [TS-2061] Avoid writing outside buffer boundaries in LogFile overfill + buffers (for ASCII logging). Author: quehan + + *) [TS-2041] Allow environment values to override records.config settings. + + *) [TS-2044] Avoid logging client running into DENY status infinitely. + Author: Yunkai Zhang + + *) [TS-2030] Build error with --enable-interim-cache on fedora19 x86_64. + Author: bettydramit + + *) [TS-1943] Rename mcport and rsport parameters to make them more readable. + Author: Yunkai Zhang + + *) [TS-2046] Move perl contribs to lib/perl, and have make / make install + build and install the modules as appropriate. + + *) [TS-954] Better calculation of blocks in volume. Bumped cache DB version. + + *) [TS-2050] Cleanup ink_config.h.in, removing unused symbols. + + *) [TS-1487] [TS-2035] Moved plugin init, added plugin lifecycle hooks, + added delay listen for cache. Removed TS_NO_API defined/build option. + + *) [TS-2047] Schedule RamCacheCLFUSCompressor in RamCacheCLFUS::init instead + of immediately after instantiation. + + *) [TS-2042] Remove remnants of unused vingid command line option. + + *) [TS-1898] improve cluster read/write performance. + + *) [TS-2036] Enable mgmt update (traffic_line -x) for plugins. + + *) [TS-2037] Fix start-stop-daemon typo in debian init script. + + *) [TS-2003] Fix RAM cache stats when using cluster. Author: Yunkai Zhang. + + *) [TS-1820] Cleanup UNUSED / INK_UNUSED / RELEASE_UNUSED. This also removes + the entire mgmt/tools directory, and relevant support in traffic_shell for + managing network interfaces. + + *) [TS-2033] Remove EncryptToFile() and mgmt API. + + *) [TS-2027] Initialize the ConnectionCount`s mutex. + + *) [TS-1997] Remove TSHttpTxnCachedUrlSet() API. + + *) [TS-2028] Config parse problems in healthchecks plugin. + + *) [TS-1966] Add new configuration option to control ProxyAllocator size. + + *) [TS-1151] cop may crash due to uninitialized pointer val + + *) [TS-2013] Install the tspush script. + + *) [TS-2012] Use standard C++11 containers in logstats. + + *) [TS-1999] Merge in the healthcheck plugin from GoDaddy. + + *) [TS-1998] Make stats-over-http support configurable URL path. + + *) [TS-1990] Fix core at CacheContinuation::handleDisposeEvent(). + + *) [TS-2010] Build fixes for OS X 10.9 and Xcode 5. + + *) [TS-1957] fix If CacheContinuation timeout, timeout Event will be loop + + *) [TS-2007] Add TSNetConnectTransparent API for transparent net connections + + *) [TS-1991] clang complaint: logical not is only applied to the left hand + side of this comparison. + + *) [TS-1958] Web UI can crash doing a regex lookup. + + *) [TS-1994] Increase default RAM cache size by a magnitude. + + *) [TS-1978] Segfault when trying to set an error from (remap) plugin. This + also fixes a bug where the "default" template was not loaded upon startup. + + *) [TS-1977] Build issues on OSX / clang related to stacksize changes. + + *) [TS-1972] TSNetAcceptNamedProtocol does not receive connections. + + *) [TS-1970] Using ssl_ca_name= in ssl_multicert.config fails. + + *) [TS-1946] Reduce the verbosity of SSL handshake errors. + + *) [TS-1971] Switch jtest over to standard argument parsing. + + *) [TS-1786] Only enable -Werror for development builds. + + *) [TS-1960] Decouple stacksize config from core, this fixes regressions. + + *) [TS-1959] traffic_manager not honoring some records.config settings + necessary for e.g. logging. + + *) [TS-745] support interim caching in storage to enable, use + '--enable-interim-cache' configure option. + + *) [TS-1968] Promote header_rewrite plugin from experimental + + *) [TS-1961] Add tsxs support for querying installation variables. + + *) [TS-1886] Allow users to attempt gcc builds on Mac OS X. + + *) [TS-1934] Make geoip_acl experimental plugin compile. + + *) [TS-1948] Remove obsolete TSPluginLicenseRequired and plugin.db. + + *) [TS-1942] Remove username.cache configs, they are obsolete and long gone. + + *) [TS-1496] Enable per transaction flow control. + + *) [TS-1684] Added more ProxyAllocators for faster and local memory + allocation. Have seen a doubling in performance depending on the benchmark. + Details in the ticket. + + *) [TS-1962] Don't add POST server connections to the shared pool unless POST + keep-alive option is on + + +Changes with Apache Traffic Server 3.3.4 + + *) [TS-1940] HostDB gets reinitialized on startup. + + *) [TS-1941] Make Linux native-AIO build cleanly. + + *) [TS-1938] Remove unnecessary thread_id() wrapper in reclaimable freelist. + Author: Yunkai Zhang + + +Changes with Apache Traffic Server 3.3.3 (never released) + + *) [TS-1936] Make traffic_logstats honor the log-directory as intended. + + *) [TS-1937] Prevent assert on SSL forward proxy connect. + + *) [TS-1907] Using the "ipv6" option for port configuration cancels the + "ssl" option. + + *) [TS-1927] Make ats_base64_decode able to handle the URL variant + + *) [TS-1207] Move cacheurl plugin out of experimental + + *) [TS-1857] On CentOS the Lua plugin is built whether Lua is found or not. + + *) [TS-1492] Prevent net-throttling from locking out the health checks + from traffic_cop. + + *) [TS-1827] Make combo_handler be enabled via remap.config, and + various other cleanup fixes. Author: Conan Wang and Leif. + + *) [TS-1932] Use a more modern tar format so asf-dist can handle + file paths longer that 99 bytes + + *) [TS-1891] Add double-free checking for reclaimable freelist + Author: Yunkai Zhang + + *) [TS-1926] Require Lua v5.1, by checking for lua_getfenv(). This is + necessary since we are incompatible with Lua v5.2 (for now). + + *) [TS-1921] Fix reclaimable freelist getting stuck in infinite loop. + Author: Yunkai Zhang + + *) [TS-1824] TSHttpTxnPushedRespHdrBytesGet() takes an int argument in the + implementation, whereas the prototype does not have this. + + *) [TS-1928] False warning when setting log rotation size to exactly 10. + + *) [TS-1812] Remove obsolete syslog_thr_init calls. + + *) [TS-1825] TSPortDescriptorAccept does not use it's argument. + + *) [TS-1925] Remove obsolete MMH hash API. + + *) [TS-1924] Add metalink plugin documentation. + Author: Jack Bates + + *) [TS-1913] Fix memory issue caused by resolve_logfield_string() + Author: Yunkai Zhang + + *) [TS-1915] header_rewrite uses TSUrlHostSet() when using set-destination + PATH. Author: Nick Berry. + + *) [TS-1941] Configured socket buffer sizes are not applied. + + *) [TS-1912] SSL hangs after origin handshake. + + *) [TS-1911] Enforce MIOBufferAccessor accessor API. + + *) [TS-1909] Remove duplicate listen socket conditioning. + + *) [TS-1896] Cleanup of unused defines, sometimes duplicated. + + *) [TS-374] Reduce lock contention in HostDB. + + *) [TS-1728] Assign storage entries to volumes + Author: Justin Laue + + *) [TS-1899] strtod() does not honor Hex strings on Solaris. + + *) [TS-1894] madvise() not found on Solaris. And some cleanup. + + *) [TS-1765] Make jtest build more like the rest of the code. + + *) [TS-1892] Move msync APIs to ink_memory.h. + + *) [TS-1890] Authproxy plugin caching and reliability fixes. + + *) [TS-1868] TSREMAP_*_STOP does not stop remap plugin evaluation chain. + + *) [TS-1889] Refactor remap plugin request URL handling. + + *) [TS-1887] Make diagnostic location logging more succinct. + + *) [TS-1884] Remove deprecated IPv4-only NetProcessor API. + + *) [TS-1444] [TS-1881] URL lookup with regex through the web interface was broken. + + *) [TS-1880] Use pthread_setname_np to set the thread name on multiple platforms. + + *) [TS-1879] Make the Ptr<> conversion constructor explicit. + + *) [TS-1768] Prefer AC_SEARCH_LIBS to AC_CHECK_LIB to avoid unnecessary linking. + + *) [TS-1877] Fix multiple Lua remap plugin instance creation. + + *) [TS-1770] Unable to create remap rule for SSL sites when accessed as a + forward proxy. Author: Mark Harrison. + + *) [TS-1867] The combo_handler plugin crashes when receiving non-200 responses. + Author: Conan Wang + + *) [TS-1838] Improve configure.ac to recognize compiler setup better. + + *) [TS-1865] Support DESTDIR in tsxs. + + *) [TS-1864] Illumos / OmniOS needs -m64 with gcc to compile properly on + 64-bit platforms. We also only support ATS on 64-bit Illumos. + + *) [TS-1811] Make the HostDB sizes variable on the SRV enabled config. + This restores compatibility with HostDB's prior to v3.3.1. + + *) [TS-1843] Detect and link libhwloc on Ubuntu. + + *) [TS-1858] Fix redefinition of timersub on Solaris. + + *) [TS-1861] Build fails with reclaimable freelist enabled. + + *) [TS-1860] cacheurl doesn't compile on platforms with pcre/pcre.h + (e.g. solaris). + + *) [TS-1729] Fix channel_stats to compile on various Unixen. Note that this + is still IPV4 only. + + *) [TS-1839] We fail "make test" on some platforms, due to install + directory missing. + + *) [TS-1856] Replace SIZE() macro with COUNTOF() and countof(). + + *) [TS-1853] Fix formatting in logstats to be consistenly 2 decimals. + + *) [TS-1852] Fix missing AC_PROG_SED on CentOS 5.9. + + *) [TS-1717] Fix the static build as much as possible. + + *) [TS-1851] Turn HostDBInfo back into a POD type. + + *) [TS-1850] Improve SSL certificate error reporting. + + *) [TS-1848] Fix MIMEHdr::field_value_set_int64() wrapper. + Author: Yunkai Zhang + + *) [TS-986] experimental.h has a dependency on netinet/net.h (for + struct in_addr). + + *) [TS-1794 Replace all usage of ts_debug_assert() with ts_assert(). + + *) [TS-1834] Force absolute paths to fix logstats unit tests. + + *) [TS-1817] Use the libaio interface to the Linux AIO system calls. + + *) [TS-1721] Integrate tstop into the autotools build. + + *) [TS-1706] Fix documentation for Config::Records.pm. + + *) [TS-1218] Make traffic_cop tracing configureable at build time. + + *) [TS-1846] Remove TestHook. + + *) [TS-1844] Efficient HostDB file allocation. + + *) [TS-1586] Fix the SPDY plugin build under clang on Linux. + + *) [TS-1053] Make combo_handler compiler. Author: Conan Wang. + + *) [TS-1792] Cleanup extremely verbvose debug text on Vary headers. + + *) [TS-1819] Guarantee hwloc initialization. + + *) [TS-1830] Add getpagesize library function. + + *) [TS-1801] Remove proxy.config.net.throttle_enabled, it's an artifact + of a debugging case, and should not have been included upstream. + + *) [TS-1829] Values exceed capacity of 'long' data type on 32-bit + + *) [TS-1755] Add basic logstats tests. + + *) TS-1184 Additional whitespace in proxy.config.admin.user_id value results + in error. + + *) [TS-1662] Remove remaining use of register storage class. + + *) [TS-846] Eliminate proxy.config.remap.use_remap_processor. + + *) [TS-1802] Remove proxy.config.net.accept_throttle. + + *) [TS-1752] Change type of "len" in jtest to off_t, for better + compiler compliance. + + *) [TS-1826] Remove DumpStats/http_dump dead code. + + +Changes with Apache Traffic Server 3.3.2 + + *) [TS-621] Allow caching of empty docs (currently only if a header + Content-Length: 0 is in the response). New config option is named + proxy.config.http.cache.allow_empty_doc, and is disabled by default. + + *) [TS-1778] Remove vestigal extensions.config support. + + *) [TS-1806] bogus buffer sizing in CfgContextUtils.cc. + + *) [TS-1805] Fix stats ExpressionEval in stats xml. + Author: Yunkai Zhang + + *) [TS-1783] Eliminate the wpad.dat configuration option (it's unused). + + *) [TS-1787] Eliminate old ink_time code (was probably only used by the + now obsolete Web UI). This fixes compiling with gcc 4.8 as well. + + *) [TS-1067] Remove unused config (and code) for bandwidth management. + + *) [TS-1736] Fatal() terminates process without a backtrace. + Author: Yunkai Zhang + + *) [TS-1791] remove m_mutex acquire&release to avoid deadlock in + ~LogBufferList(). Author: Gang Li . + + *) [TS-1713] SRV support refine. Now the srv option is able to enable, with + no crash. Be care, the hostdb.storage_size or ostdb.size need check. + + *) [TS-1632] In addition to the changes from TS-1674, also add + some safety measures assuring that the stats sums don't go. + negative. Author: Yakov Kopel. + + *) [TS-1789] Script to compare RecordsConfig.cc default values with + records.config.default.in: Author: Mark Harrison. + + *) [TS-1631] Mgmt API to clear stats does not actually clear it. + Author: Yakov Kopel. + + *) [TS-1772] Remove multiple TS_INLINE defines. + + *) [TS-1771] add http_load to the build. + + *) [TS-1766] integrate AIO test into the autotools test suite. + + *) [TS-1753] Add remap support to the cacheurl plugin. + Author: Mark Harrison + + *) [TS-1790] authproxy should accept 2xx as authorization success. + + *) [TS-1780] Fix Debuntu build with hardening flags. + + *) [TS-1623, TS-1625] Fixes for logging where the host name was not handled + correctly because it was not in the URL. + + *) [TS-1754] Remove unecessary wWarnings from stats evaluation. + Author: Yunkai Zhang. + + *) [TS-1169] Eliminate bogus asserts. Credits to Conan Wang. + + *) [TS-1764] Unify MAX/MIN definitions (in ink_defs.h). Also clean + up the checks of gcc prior to v3.x (which we no longer support). + + *) [TS-1724] Add tool to compare records.config files to contrib. + Author: Mark Harrison + + *) [TS-1566] dynamic update for string vars does not work. + Author: Aidan McGurn + + *) [TS-1708] Using tr-pass port option causes requests with large headers to + hang. + + *) [TS-1734] Remove dead code that invokes missing vmap_config tool. + Author: John Kew + + *) [TS-1660] Host field should not has c style terminator. + + *) [TS-1627] Support requests with payload. + + *) [TS-1763] Add Arch Linux config.layout. + Author: Galen Sampson + + *) [TS-1749] Stats cluster values among nodes are not consistent. + Author: Yunkai Zhang + + *) [TS-1761] Improve scheduling and configuration around HostDB disk sync. + + *) [TS-1758] Remove unused overviewPage aggregation functions. + Author: Yunkai Zhang. + + *) [TS-1748] Add jtest to the build. + + *) [TS-1730] Supporting First Byte Flush for ESI plugin. + Author: Shu Kit Chan + + *) [TS-1745] Fix typos. + Author: Benjamin Kerensa + + *) [TS-1671] Remove AlarmListable from overviewRecord. + Author: Yunkai Zhang + + *) [TS-1740] Improve precision of stats values. + Author: Yunkai Zhang + + *) [TS-1742] Freelists to use 64bit version w/ Double Word Compare and Swap. + + *) [TS-1356] Ability to set thread affinity with multiple modes. + + +Changes with Apache Traffic Server 3.3.1 + + *) [TS-1743] Implement our own hash mechanism for traffic_logstats, since + C++11 does not provide a sensical hash. + + *) [TS-1628] In validate_unmapped_url(), pristine_url can be invalid(). + + *) [TS-1714] Fix some build problems for gcc v4.8. + + *) [TS-1626] Remove WUTS proxy code. + Author: Uri Shachar + + *) [TS-1741] Add plugins examples to the build. + + *) [TS-1058] Add TSHttpTxnCloseAfterResponse experimental API. + Author: Yakov Kopel + + *) [TS-1733] Retool tsxs so that it can compile multiple source files + Author: Dale Ghent + + *) [TS-1738] proxy.cluster.cache_total_hits_mem is missing in RecordsConfig. + Author: Yunkai Zhang + + *) [TS-1739] Fix TODO within varFloatFromName() + Author: Yunkai Zhang + + *) [TS-1559] Server side termination not handled properly when a PluginVC + Protocol Plugin is used. Author: Yossi Gottlieb. + + *) [TS-1300] Document the usage of TSUrlStringGet() and derivatives. + Author: Uri Shachar. + + *) [TS-1645] increase the file stat resolution on config files + Author: Yakov Kopel + + *) [TS-1557] update ua_begin_write + Author: Aidan McGurn + + *) [TS-1320] Reading from SSL origin can starve sending data to client. + + *) [TS-1155] POST requests that are chunked encoding hang when going + forward to origin over SSL + + *) [TS-1634] reimplement Lua state management to support reload + + *) [TS-1716] authproxy fails to reserve an argument index in global mode + + *) [TS-1710] esi plugin enhancement such as support forward proxy + Author: Yu Qing + + *) [TS-1707] fix FreeBSD store blocks calculation + Thanks to Ben Aitchison + + *) [TS-1704] null pointer dereference in dns_result + Author: Li-Wen Hsu + + *) [TS-1701] segv if header_rewrite is configured with the InkAPI and uses + PATH or QUERY conditions. Author: John Kew. + + *) [TS-1700] disable static libraries by default. + + *) [TS-1653] prevent the crash that retry dns lookup after timeout. + + *) [TS-1006] memory management, cut down memory usage. + Author: Yunkai Zhang + + *) [TS-1678] Simplify register_record + Author: Yunkai Zhang + + *) [TS-1252] stats summary in cluster not working + Author: Yunkai Zhang + + *) [TS-1679] channel_stats plugin: count 5xx response and clean up code. + Author: Conan Wang + + *) [TS-1676] FetchSM (TSFetchUrl) cannot handle POST bodies > 32kb. + + *) [TS-1675] Adding API method TSHttpTxnClientIncomingPortSet. + + *) [TS-1674] TSStatIntDecrement is broken: the logic is flawed. + + *) [TS-1673] Remap with recv port is using the wrong port. + + *) [TS-1672] Emergency throttling can continue forever. + + *) [TS-1650] Inactivity cop should use TRY_LOCK instead of LOCK to avoid + deadlock. + + *) [TS-1649] Don't use ink_bind if we're not specifying a local port. + + *) [TS-1667] remove unused enum TSIOBufferDataFlags. + + *) [TS-1538] SSL accept performance regression. + + *) [TS-977] RecCore usage cleanup. + + *) [TS-1574] [TS-1577] when read_from_writer, we should not do range + acceleration. Range request can invalidate cached copy if the server + reponse is 304. + + *) [TS-1609] Traffic Cop doesn't wait() for its children. + + *) [TS-1601] HttpServerSession::release don't close ServerSession if + ServerSessionPool locking contention. + + *) [TS-1643] Post requests with no Content-Length header sends default error + response. + + *) [TS-1640] SSL certificate reconfiguration only works once. + + *) [TS-1246] trafficserver script error message (in ubuntu). + + *) [TS-1630] Add Lua API for TSHttpTxnCacheLookupStatusGet. + + *) [TS-1423] Added option to do transparent passthrough on connections that + are not valid HTTP. + + *) [TS-1599] set OpenSSL allocator with CRYPTO_set_mem_functions. + + *) [TS-1624] Remove JEMALLOC_P use, it seems to have been deprecated. + + *) [TS-1500] let ssl_multicert.config specify sslcert per port + + *) [TS-1621] Adopt ConfigUpdateHandler pattern. + Author: Ethan Lai + + *) [TS-1619] simplify ConfigurationProcessor reconfiguration pattern. + + *) [TS-1617] Build cacheurl when experimental plugins are enabled. + Author: Aron Xu. + + *) [TS-1307] [TS-1422] Changed HostDB handling of IPv4 and IPv6. Address + resolution preferences can be configured globally and per HTTP proxy + port. Transparent connections can now fail over even if + use_client_target_addr is set. + + *) [TS-1616] authorization proxy plugin. + + *) [TS-1615] Some spelling errors in source code. Author: Aron Xu. + + *) [TS-1608] IpAllow should use standard config update. + + *) [TS-1580] Mutex leak plugged. + + *) [TS-1596] Added channel_stats plugin to experimental. Author: Conan Wang. + + *) [TS-1607] decouple SSL certificate lookup + + *) [TS-1506] % log symbol will crash TS when requesting a SSL url. + Author: Conan Wang. + + *) [TS-1594] ProxyMutexPtr and Ptr are identical. + + *) [TS-1591] gzip plugin should load relative config file. + + *) [TS-1582] C++11 requires space between strings and format specifiers. + Author: Luca Rea + + *) [TS-1491] Browser always prompts for authentication (NTLM). + Author: Yakov Kopel. + + *) [TS-1553] Detect and build C++11 dependencies. + + *) [TS-1576] reload splitdns.config at runtime. + Author: Ethan Lai + + *) [TS-1551] reload ssl_multicert.config at runtime + Author: Ethan Lai + + *) [TS-1572] Plugin response status change can trigger ATS assertion + Author: Uri Shachar + + *) [TS-1433] to make read from writer work + + *) [TS-1564] fix the rolling of Logs created via the API in plugins + Author: Craig Forbes + + *) [TS-1565] TSStringPercentEncode returns one character short in no-op case + (no encoding needed). Author: Thach Tran . + + *) [TS-1561] Plugin esi - Enhancements on ESI plugin. + Author: Kit Chan + Yu Qing + + *) [TS-1560] plugins need memory barriers for ARM. + + *) [TS-1558] use_client_addr breaks control over upstream HTTP protocol + version. + + *) [TS-207] Add raw disk support for FreeBSD. + + *) [TS-1494] sslCa should be set NULL after parseConfigLine in + SSLCertLookup.cc. + + *) [TS-1382] make jtest 64bit nice. + + *) [TS-1550] remove unnecessary USE_CONFIG_PROCESSOR define. + + *) [TS-1549] Drop MakeErrorVA, enabling body_factory by default. + + *) [TS-1548] Update documentation for ip_allow.config. + Author: Nick Berry + + *) [TS-1223] fix the crash in http_ui show network connections. + + *) [TS-1543] Enable non-debug logging for rfc5861 plugin. + + *) [TS-1512] get volume & hosting work with cluster. + + *) [TS-1542] Fix so that rfc5861 plugin will compile against older versions + of TS. + + *) [TS-1446] Make sure age header is enabled in rfc5861 plugin + + *) [TS-1539] Update build package list in README + + *) [TS-1535] FetchSM process_fetch_write should ignore event + TS_EVENT_VCONN_WRITE_READY. + + *) [TS-1534] implement the interim cache for ESI parser result. + + *) [TS-1501] vc`s inactivity_timeout event should be schedule in vc`s + thread. + + *) [TS-1503] make Event::schedule simple and efficient + Author: KuoTai + + *) [TS-1532] make esi plugin support cookie sub keys. + + *) [TS-1526] SNI support breaks IP-based lookup. + + *) [TS-1513] SPDY plugin crashes on connection close. + + *) [TS-1516] use_client_addr breaks parent proxy configuration + Author: Uri Shachar. + + *) [TS-1518] detect header_rewrite plugin boost dependency. + + *) [TS-1003] make Prefetch config file reloadable + + *) [TS-1462] SPDY proxy plugin + + *) [TS-1488] Check the event`s cancel flag before put it into the + PriorityEventQueue. Author: Chen Bin (kuotai). + + *) [TS-1484] Fix SNI crashes where there is no default certificate + + *) [TS-1473] Fix header_filter plugin for ARM. + + *) [TS-1457] Change chunking output to avoid massive memory use by + transforms. + + *) [TS-1469] Manager uses hardcoded FD limit causing restarts forever on + traffic_server. Reported By: Thomas Jackson + + *) [TS-1466] disable ssl compression by default. + + *) [TS-1464] mark ink_assert with the noreturn attribute. + + *) [TS-1458] fix LuaJIT include ordering. + + *) [TS-1454] crash when ic_hostname is null in cluster mode. + + *) [TS-1270] add force local cache control in clustering mode. + + *) [TS-1386] thread hang in cluster type=1, which will trigger the + throttling. + + *) [TS-1351] raw disk cache disabled when system start. + + *) [TS-1452] gzip build failure with Apple/clang-421.0.57. + + *) [TS-1339] Move fragment offset table from First Doc to Alt header so + that fragment offsets are stored per alternate. + + *) [TS-1416] Do not do cache lookup if cop_test_page is true to stop + artificial increase of cache misses. + + *) [TS-1364] Rewrite reverse-proxy URL headers in all cases, not just + a basic Redirect target. + + *) [TS-1440] Lua transaction and session hook support. + + *) [TS-1266] RAM cache stats are wrong with CLFUS. + + *) [TS-1437] Make the Lua plugin compile on RedHat based distros. + + *) [TS-1436] Added Yahoo directory layout for installation. + + *) [TS-1418] Add automake file to Metalink plugin. Author: Jack Bates. + Also reorganize the plugins/experimental Makefile.am stuff slightly. + + *) [TS-1414] gzip plugin enhancements. + + +Changes with Apache Traffic Server 3.3.0 + + *) [TS-1427] PluginVCs now use a method similar to actual socket VCs + for firing inactivity timeout events. + + *) [TS-1426] protection from NULL deref when using + TSHttpTxnOutgoingTransparencySet after a user agent has disconnected. + + *) [TS-1425] clean up the lingering read before deallocating its buffers + when an origin server terminates the connection during a POST request. + + *) [TS-1249] Disable ESI packed nodes by default. + Author: Shu Kit Chan + + *) [TS-1421] Modify the default log configs to avoid logging to stdout + or stderr. This helps with TS-306 (but is not a solution for lack of + log rotation on traffic.out). + + *) [TS-1415] return 400 if the length of request hostname is zero. + + *) [TS-1379] Better error message when mgmt socket is not available. + + *) [TS-1389] Replace TSHttpTxnServerRespNoStore() with + TSHttpTxnServerRespNoStoreSet(), and move it to ts/ts.h. Author: Phil + Sorber. + + *) Removed the (experiemental) TSHttpTxnClientDataGet() API. See + TS-998 for more details. + + *) [TS-1408] Plugin to implement the stale-while-revalidate and + stale-if-error features of RFC5861 + Author: Phil Sorber + + *) [TS-1406] add ESI to experimental plugins build. + + *) [TS-1387] Allow proxy.config.http.insert_age_in_response to be + overridden. Author: Phil Sorber + + *) [TS-1392] Fix SNI certificate fallback path. + + *) [TS-1385] generic atomic operations API. + + *) [TS-1380] SSL wildcard lookup doesn't find the longest match. + + *) [TS-1315] Fix URL parsing to handle non-HTTP schemes correctly. + + *) [TS-1322] CONNECT to parent proxy has URL with a trailing slash + Author: Yakov Kopel + + *) [TS-1370] Restore original stale-wile-revalidate code for posterity + Author: Phil Sorber + + *) [TS-1340] Improve IPv6 port example in records.comfig + Author: Jan-Frode Myklebust + + *) [TS-1363] Cert path not working using intermdiate certificate. + + *) [TS-895] Added version checks for bison and flex. + + *) [TS-1331] Wrong regex for ip in records config. Authors: + Yakov Kopel and Uri Shachar. + + *) [TS-1348] Remove the active timeout when releasing or + binding server_session. + + *) [TS-1350] Detect and prefer LuaJIT. + + *) [TS-538] Remove deprecated INKStats API. + + *) [TS-1345] fix signed/unsigned compilation issues in Vec. + + *) [TS-1343] Stat system doesn't check buffer sizes. + + *) [TS-1342] Lua plugin initial hook support. + + *) [TS-1314] Remove TS_ARG_MAX usage so that platforms with + unlimited ARG_MAX can build correctly. + + *) [TS-1341] Remove remnants of TSCacheHookAdd() API. + + *) [TS-1328] TSMgmtIntCreate and TSMgmtStringCreate validation. + Author: Yakov Kopel. + + *) [TS-1338] SSL not handling some events properly. + + *) [TS-1258] Need the ability to allow a user to alter the background fill + config values on a per transaction basis. Author: Robert Logue. + + *) [TS-961] Add TSPortDescriptor API to support accepting connections with + inbound transparency. + + *) [TS-1332] Silence spurious error when adding SSL certificates with + alternate names. + + *) [TS-1087] TSHttpTxnOutgoingAddrSet forward declaration does not match + implementation. + + *) [TS-1319] Large cache (> 16TB) not working + Author: Van Doorn, Jan R + + *) Fix a bug that ram cache and evcuation can not work + well in disks larger than 2TB. Author: weijin + + *) [TS-1321] improve RT on Cluster purge missing objects + Author: Bin Chen + + *) [TS-1312] Allow to open cache disk without O_DIRECT, for e.g. tmpfs + "disk" cache. + + *) [TS-1289] stats codes mess up when disk fail + + *) [TS-959] remove ae_ua filter + + *) [TS-1310] Fix a endless loop in CacheVC::removeEvent. + Author: weijin & Hua Cai + + *) [TS-1299] Fix collation in custom logging. + Author: bettydramit + + *) [TS-1306] Fix WCCP build problems on FreeBSD. + + *) [TS-1303] Added '=6' as special case for HTTP port configuration for + backwards compatibility. + + *) [TS-1301] Add a new API, TSHttpTxnMilestoneGet(), which can be used + to retrieve the various internal milestone timers from a plugin. + + *) [TS-1295] Don't assume root privileges during make install. + Author: Jan-Frode Myklebust + + *) [TS-1294] initscript mentions using /etc/sysconfig/trafficserver, but + doesn't use it. Author: Jan-Frode Myklebust + + *) [TS-1293] initscript should provide chkconfig header. + Author: Jan-Frode Myklebust + + *) [TS-1297] Do not link all binaries and plugins with libz and liblzma. + + *) [TS-1296] Do not link all binaries and plugins with libreadline. + + +Changes with Apache Traffic Server 3.2.0 + + *) [TS-1286] Cleanup some code around freelists and allocators. + + +Changes with Apache Traffic Server 3.1.4 + + *) [TS-1281] make check fail on RHEL 5.x. + + *) [TS-1282] Verbosity settings for Via headers is broken. + + *) [TS-1279] Fix build system for gcc < 4.3. + + *) [TS-1277] Fixed issue with IPv6 URLs and remap configuration. + + *) [TS-1195] Support the use of raw IPv6 address in URLs and Host fields. + + *) [TS-1275] Fix startup problem where /var is on a volatile disk. + Author: Eric Connell. + + *) [TS-1274] Transformation plugins can send content-length with + non-identity transfer encoding. Author: Otto van der Schaff. + + *) [TS-1272] workaround for - FATAL: HttpSM.cc:890: failed assert `0` + + *) [TS-1240] Fix race in log buffer queuing code. + + *) [TS-1271] deprecate INKStats API + + *) [TS-1250] Cache inspector does not seem to work properly + + *) [TS-1269] Building outside source tree fails on plugins. + + *) [TS-1222] single tcp connection will limit the cluster throughput. + + *) [TS-475] Accelerated single range requests. + Based on initial work by ericb, with help from bwyatt. + + *) [TS-1236] HTTP Accept filters do not work on Illumos. + + *) [TS-1075] Workarounds for linux auto-port issues in transparent + deployments. + + *) [TS-672] cleanup Win32 references. + + *) [TS-1181] Make the overridable configs work with "byte" configs. + + *) [TS-1252] Fixed include issues when using mgmtapi.h. + + *) [TS-1248] update HTTP status codes and strings. + + *) [TS-1245] proxy.config.http.connect_ports may be '*'. + + *) [TS-1239] TSHttpTxnServerAddrSet implementation. + + *) [TS-1237] custom log field/filtering improvements. + + *) [TS-1090] SO_MARK and IP_TOS support for Linux. + + *) [TS-1238] RAM cache hit rate unexpectedly low with CLFUS. + + *) [TS-1242] Make it build with some more recent automake versions. + + *) [TS-1241] Memory leaks when using TSHttpSchedule(). + Author: Aidan McGurn. + + *) [TS-1163] Support for raw disks larger than 2TB on Linux. + + *) [TS-1230] added a paramter to the configure script to allow overriding + the calculated ARG_MAX value. + + *) [TS-1208] enable check_memory() in traffic_cop for Linux. + + *) [TS-1217] cop cleanup, remove unused variables & defines. + + *) [TS-1209] Allow for background fill even when a transform plugin is + the producer. Author: Robert Logue. + + *) [TS-1229] clean up RecordsCofing.cc, remove unused entries. + + *) [TS-1142] record ram hit in stats. + + *) [TS-1213] update will crash at HttpTransact::process_quick_http_filter. + + *) [TS-1186] Fixed Perl stats API to work with 64-bit stat values + + *) [TS-1227] header_filter "set" operator doesn't work if the header + doesn't already exist. + + *) [TS-1210] remove 3.0.x deprecated APIs + + *) [TS-1225] Remove 32 bit doc_len instances. + + *) [TS-1226] Make header_filter support e.g. '=' characters in header + values. + + *) [TS-1150] Some performance improvements around the heap guard. + + *) [TS-1216] Remove the initializer for some gcc`s limits. + + *) [TS-1205] double free when RecDataSet in cluster mode. + + *) [TS-1220] stats: cleanup and fix the wrong values. + + *) [TS-1212] can not limit ram cache, also fix the stats. + + *) [TS-1214] another race condition in cache init. + + *) [TS-1130] Wrong CAS operation on ink_time_t on 64 bit system. + + *) [TS-1127] Wrong returned value of incoming port address. This + API is deprecated, so I also fixed the regression tests accordingly. + Authors: Yakov Kopel and Leif. + + *) [TS-1211] Read backlog config value to set the listen backlog. + + *) [TS-1202] Install traffic_shell man/doc pages in a more appropriate + location. Author: Igor Brezac. + + *) [TS-1198] ssl crash when certificates are missing. + + *) [TS-1164] a race condition in cache init. + + *) [TS-1079] Add an API function to turn debugging on for specific + transactions/sessions. This also adds a new Debug() functionality in + both core and APIs. Author: Uri Shachar. + + *) [TS-1194 Change conversions to build with gcc-4.6 on OmniOS/Solaris. + Also cleanup a couple of plugins to use our "core" build environment. + + *) [TS-1192] Remove gethostbyname usage in test code + + *) [TS-1147] deprecate records.config SSL configuration + + *) [TS-1121] Make --disable-diags at least disable Debug etc. + + *) [TS-1191] Change defaults for proxy.config.dns.search_default_domains to + not use the search domains in resolv.conf. + + *) [TS-1190] Change defaults for proxy.config.http.share_server_sessions to + have a session pool per net-thread. This is best performance for most + common use cases. + + *) [TS-1189] Build problem with older versions of OpenSSL. + + *) [TS-1178] cop will kill manager & server, even cop it self, in cluster. + + *) [TS-1156] Fix timestamp log fields, and stop supporting network byte + order in various log buffers. See TS-1182 for future enhancements. + + *) [TS-1017] Update logging to be IPv6 compliant, including collation. + + *) [TS-1080] If we run out of Log Buffer slots, we assert. + + *) [TS-1176] Eliminates the need for a delayed "delete" of log buffers. + This was a serious race condition, which was previously sold by delaying + deletes via a ring buffer. + + *) [TS-1036] Improve some squid log compatiblity. Suggestions from mnot. + + *) [TS-1092] Remove specific SSL termination mode, we either terminate, or + we do not. + + *) [TS-1173] Improve the comments in remap.config. + + *) [TS-981] Remove the support for libev (for now at least). + + *) [TS-1172] Remove remap/StringHash.{cc,h}, they are not used. + + *) [TS-1171] http_ui cache lookup, double free. + + *) [TS-1168] Change UrlRewrite::BuildTable to be IPv6 compliant. + + *) [TS-1167] Updates parent socks server setup to be IPv6 compliant. + + *) [TS-1166] Remove proxy/Stuffer.[cc,h] because they were unused. + + *) [TS-1162] UnixNetVConnection assertion when accepting a TLS connection + + *) [TS-1135] support wildcard certificates for ServerNameIndication (SNI) + + *) [TS-1140] Combine IP Allow and QuickFilter. + + *) [TS-1159] Add compiler hints to debug logging + + *) [TS-1143] Fixed edge case problems in IpMap. + + *) [TS-1114] Fix to lock vol for CacheVC::write_vector. + + *) [TS-857] Possibly related race in UnixNetVConnection::mainEvent + on inactivity timeout. + + *) [TS-1149] Pretty up automake output. + + +Changes with Apache Traffic Server 3.1.3 + + *) [TS-1145] Additional clang build fixes. Author: Darrin Jewell. + + *) [TS-1144] Fix out of tree builds. Author: Darrin Jewell. + + *) [TS-1138] Fixed off by one range error in IpMap. + + *) [TS-462] Support TLS Server Name Indication (SNI) + + *) [TS-1134] TSNetAcceptNamedProtocol should fail if NPN is not supported. + + *) [TS-1133] Make the max host-header length configure.ac configurable. + + *) [TS-1002] fix custom loggin with cquuc cquup, and introduce cquuh + to recode the client_req_unmapped_url_host. + + *) [TS-701] Remove mgmt/cli/script_configs.sh + + *) [TS-1124] Move regex_remap, header_filter and stats_over_http from + the plugin repo to the main repo. + + *) [TS-1111] fix crash in RangeTransform::handle_event + + *) [TS-1109] fix stack dump crashing + + *) [TS-1123] editline/readline conflicts when building on OSX. + + *) [TS-1116] Fixes for building the source with clang/lvm. + + *) [TS-1115] Fixes for building the source with Intel CC (icc). + + *) [TS-1102] Cleanup of Diagnostics code. Author: Uri Shachar and Leif. + + *) [TS-1117] Remove TS_HAS_PURIFY MACRO + + *) [TS-937] EThread::execute still processing cancelled event + + *) [TS-995] Name change for IP support (ink_inet.h). + + *) [TS-841] support TLS NextProtocol negotiation + + +Changes with Apache Traffic Server 3.1.2 + + *) [TS-1110] logstats incorrectly bucketizes all status codes greater + than 599 as 5xx. Author: Manjesh Nilange + + *) [TS-1094] Fixed MIME parser so certain sequences of requests on + keep alive conections no longer wedge it. + + *) [TS-1084] Add compile-time format string checking. + + *) [TS-1101] traffic_line -x no longer works, at least not in + reasonable time. + + *) [TS-1098] Make RC script support Amazon EC2 Linux AMI. + + *) [TS-1035] EventProcessor::spawn_thread doesn't check that there + is enough event threads and segfaults. + + *) [TS-1096] readline support for traffic_shell. + + *) [TS-1097] online help for traffic_shell. + + *) [TS-1066] TSHttpTxnServerReqHdrBytesGet in InkAPI.cc has an extra + parameter (int *bytes) from the prototype in . + Author: Alistair Stevenson + + *) [TS-1089] Added TSHttpConnectTransparent. + + *) [TS-1088] Added TSHttpTxnOutgoingTransparencySet to API to + control outbound transparency. + + *) [TS-1083] Initial SSL next protocol negotiation support. + + *) [TS-1082] Obey existing optimizer CXXFLAGS and CFLAGS at configure time. + + *) [TS-1077] All proxy ports are now configured by + proxy.config.http.server_ports. All other port configuration values + are deprecated. + + *) [TS-1091] CFLAGS=-w` causes configure script to wrongly guess style of + `gethostbyname_r` on BSD flavors. Author: Marc Abramowitz. + + *) [TS-1073] no_dns_just_forward_to_parent configuration parameter is + ignored/not used. Author: Kevin Giles. + + *) [TS-996] HTTPHdr::m_host goes stale if HdrHeap::evacuate_from_str_heaps + is called. Author: B. Wyatt. + + *) [TS-1041] Populate sockaddr length. Author: James Peach. + + *) [TS-1081] Eliminate an additional copy of the pristine URL string. + + *) [TS-1038] TSHttpTxnErrorBodySet() can leak memory. + Author: Brian Geffon + + *) [TS-1049] TS hangs (dead lock) on HTTPS POST requests. + Author: Wilson Ho + + *) [TS-1056] Lost UA connections can show up as "400 ERR_INVALID_REQ" + in logs. + + *) [TS-1048] Add TS API to enable plugins to use traffic server + configuration infrastructure. Author: Bianca Cooper. + + *) [TS-1074] PluginVC should schedule to the local queue instead of the + external queue. Author: Brian Geffon + + *) [TS-1032] Assertion when upstream connection is established (with event + handled by thread A) and immediately disconnected (handled by thread B). + Author: Uri Shachar. + + *) [TS-1052] trafficserver restart does not work (needs to let the old + process die). Author: Billy Viera + + *) [TS-1044] Fix TSVConn{Read,Write}VIOGet in UnixNetVConnection. + Author: James Peach. + + *) [TS-1040] Teach TSHostLookup to use const. Author: James Peach. + + *) [TS-1071] Debug statement in FetchSM broken. Author: Brian Geffon. + + *) [TS-1057] Expose Base64-encoding through APIs. + Author: Yakov Kopel and leif + + *) [TS-1014] slow log can not print logs well on 32-bit system, + changed the %d to RPI64. Author: weijin. + + *) [TS-992] Various portability fixes. Author: Piotr Sikora + + *) [TS-999] Deprecate TSUrlDestroy(), it's a no-op. Just make sure + to release the marshal buffers as normal. + + *) [TS-245] Add TSStringPercentEncode(), TSUrlPercentEncode(), and + TSStringPercentDecode(). + + *) [TS-1065] traffic_cop segment fault when enable TRACE_LOG_COP. + Author: Conan Wang. + + *) [TS-1029] DNS crash if we free the memory into system. Author: weijin + + *) [TS-1055] Wrong implementation of TSHttpSsnArgGet(). + Author: Yakov Kopel + + *) [TS-992] Portability fixes. Author: Piotr Sikora. + + *) [TS-949] Fix key->vol hash to be consistent when a disk is marked bad. + + *) [TS-1047] fix lots of spelling mistakes Author: Arno Töll + + *) [TS-1042] correct debug message in FetchSM. Author: James Peach + + *) [TS-1039] use pcre-config to find libpcre. Author: James Peach + + *) [TS-1037] Fix for computing local machine address (was ignoring + general addresses). + + *) [TS-1030] Improve hashing mechanism on WKS. + + *) [TS-1028] Avoid triggering assert when running debug build and enabling + per-thread connection pols + + *) [TS-1021] Remove extra newline from binary logs. + + *) [TS-1022] Use size specific types for serialized data in binary logs. + + +Changes with Apache Traffic Server 3.1.1 + + *) [TS-1020] Make logging to a named pipe work on Solaris. + + *) [TS-1016] Make the update frequency for stats configurable. + + *) [TS-944] Align all configurations of paths to use the same function + + *) [TS-1018] Remove obsolete OpenSSL acceleration code and configs + + *) [TS-1013] Allow ssl_multicert.config to support CA chains per host + + *) [TS-971] make cache evacuate work as expect. + + *) [TS-982] Fixed PluginVC set active/passive address. Clarified that + it expects host order input. + + *) [TS-1012] Eliminate proxy.config.http.append_xforwards_header, which + is never used. + + *) [TS-1004] Transformation plugins cause connection close when content + length is not known ahead. Author: Otto van der Schaaf. + + *) [TS-1011] Fixes for OpenSSL, specifically triggered for Solaris, but + generally broken assumptions in the old code. + + *) [TS-989] Logging is now IPv6 compatible. + + NOTE: IP addresses are now encoded as a specific type of binary + data, not a mix of unsigned ints and strings. This is a log binary + format change and therefore WILL BREAK ACCESS TO LOG DATA FROM + PREVIOUS VERSIONS. + + *) [TS-1009] Disable starting ICP continuations if ICP is not enabled. + + *) [TS-1005] Use traffic_line for reload option with RC script. + Author: Jan-Frode Myklebust. + + *) [TS-984] Log roll crash fixed. + + *) [TS-997] ATS crashes on remap plugin initialization failure. + Author: Manjesh Nilange. + + *) [TS-988] Updated ICP for IPv6. + + *) [TS-994] Removed the extra splace from X-Forwarded-For. + + *) [TS-934] Added some wrapping around NetVConnection for server + handling so that connection objects can be safely locked across + threads. + + *) [TS-991] Fixed race / stall condition for WCCP during restart. + + *) [TS-985] ts/ts.h uses C++ comments, which are technically not C. + + *) [TS-928] Compile problem in TsErrataUtil on FreeBSD 8. + + *) [TS-973] Eliminate proxy.config.http.verbose_via_str. + + *) [TS-747] Add a new option, proxy.config.ssl.compression, to turn + SSL compression on / off. This currently only works with OpenSSL + v1.0.0 and later. + + *) [TS-963] Change the way ip_allow is parsed to be consistent with + earlier versions (first match). Added default IPv6 allow. Added + regression tests for the underlying IpMap class. + + *) [TS-948] Don't reload or load a broken remap.config. + + *) [TS-824] Range requests that result in cache refresh give 200 status + response with full contents. Review and suggestions for improvements + by Charlie Gero. + + *) [TS-964] Add 64-bit integer plugin APIs for HTTP headers. + + *) [TS-932] Fix m_pending_event == NULL crash in LogCollationClientSM.cc + Author: weijin + + *) [TS-830] Better error when there are CLI permission problems, or + other problems preventing operation. Author: AdunGaos. + + *) [TS-979] Found a few places where we can segfault with strlcpy. + + *) [TS-938] Fix VIA to avoid loopback address. For Solaris only IPv4 + is supported. + + *) [TS-945] Convert transparent forward requests to server style when + forwarding to a parent proxy. Contributed by Yossi Gottlieb. + + *) [TS-926] IPv6 conversion of iocore. + + *) [TS-967] This is a simplified version of Arno Toell's patch which does + the same: Check if any -O options where given, and if so, use those, if + not, use our default -O3. + + *) [TS-957] remove IE6 from the white list of ae_ua filter + + *) [TS-955] Fix the logging regression testing. + + *) [TS-962] typo of key name in logstats.cc. Author: Nick Berry. + + *) [TS-958] Fix a few valgrind memory check errors. + + *) [TS-567] A number of memory allocation clean up, and improvements. We + now also support building with tcmalloc, jemalloc, and you can also turn + off the freelist feature (for better debugging). + + *) [TS-950] Make the HTTP header regressions work on 32-bit, inefficient, + but it works. + + *) [TS-956] fix the building with zlib-1.2.5.1. + + *) [TS-953] consolidate string copy/concat for examples. + + *) [TS-924] More efficient sharing of origin connections. + + This also overloads the config for shared connections as follows: + + # 0 - Never + # 1 - Share, with a single global connection pool + # 2 - Share, with a connection pool per worker thread + CONFIG proxy.config.http.share_server_sessions INT 1 + + This option is now per-request (remap or plugin) overridable. This is + heavily based on William Bardwells and Weijin's work. + + *) [TS-84] Unify all code to use PATH_NAME_MAX, and increase it to 4K. + + *) [TS-943] Implement support for HTTP accept filters. + + *) [TS-168] revert iObject and other changes, make collation client work + in the clean way. + + *) [TS-941] invalid cast of off_t math to int. Author: B Wyatt. + + *) [TS-940] Add new configuration option, and solaris support, to set + an initial congestion window size, proxy.config.http.server_tcp_init_cwnd. + + *) [TS-931] cluster latency too high, about 24ms. this change will cut + the latency from 20+ms to about 10ms. Author: weijin + + *) [TS-896] When logging config changes, we should check if it is remote + logging and clean up the collation client related data. + + *) [TS-936] Fix problems with core file generation on Linux. + + *) [TS-930] Fixed TSNetConnect to use network order for port. + + *) [TS-1008] Add API to get TCP connection from SSN. + + +Changes with Apache Traffic Server 3.1.0 + + *) Make sure --enable-purify works again + + *) [TS-888] Fix SSL by enabling the right direction on successful setup. + + *) [TS-925] Switch from integer thread IDs to hex thread IDs with the + diagnostics output. Author: Brian Geffon. + + *) [TS-919] All of iocore is now IPv6 compatible. Much of this work + was contributed by Yossi Gottlieb. + + *) [TS-867] moving to a different thread only if the pluginvc is invoked + from a non regular thread. + + *) [TS-876] forward map based on request receive port. Author: Manjesh + Nilange. + + *) [TS-880] Major performance problem with second request on same + keep-alive connection. This is a partial fix, another 2x improvement + can be made, but too risky right now. Authors: William Bardwell and + weijin. + + *) [TS-900] TSHttpTxnNewCacheLookupDo (experimental) breaks requests + to origin server. Author: William Bardwell. + + *) [TS-918] Allow interval-based rotation for round robin entries. + Author: M. Nunberg. + + *) [TS-916] TSHttpIsInternalRequest() crashes if client connection is + terminated prematurely. Author: Manjesh Nilange. + + *) [TS-466] Multiline headers handled improperly. + + *) [TS-914] fix iocore_net_main_poll debug info in UnixNet.cc + Author: taorui + + *) [TS-911] Remove unecessary lock in HTTP accept. + + *) [TS-908] HostDB now stores IPv6 addresses. + + *) [TS-813] fix http_ui /stat/ to response with content type + + *) [TS-849] fix some variables for traffic_line -s setting + + *) [TS-874] make asf-dist work with git repo + + *) [TS-906] ATS doesn't use proxy.config.http.forward.proxy_auth_to_parent. + + *) [TS-592] DNS internals are now IPv6 compatible. Externally this means + DNS servers with IPv6 addresses can be used although only IPv4 responses + are useful until other IPv6 upgrades are done. + + *) [TS-907] The source address for DNS requests can be set. + + *) [TS-903] Internal resolver library now IPv6 compatible. + IP address matching libraries removed, replaced with IpMap which is + faster and IPv6 compatible. SOCKS and IpAllow configurations files + will now parse IPv6 ranges, although they do not current have effect. + + *) [TS-901] Valgrind found minor leaks and uninitialized variables. + Author: William Bardwell. + + *) [TS-863] Make proxy.config.http.keep_alive_no_activity_timeout_out + configurable per transaction. Author: William Bardwell. + + *) [TS-859] Make parent proxy not not work anymore. + + *) [TS-889] Disable warnings on deprecated APIs for Darwin (OSX). This + fixes build problesm on OSX 10.7 (Lion) when using the system OpenSSL. + + *) [TS-890] update remap.config comments on regexes to be + accurate. Author: Manjesh Nilange. + + *) TS-885 service trafficserver condrestart does the opposite of what´s + intended. Author: Jan-Frode Myklebust + + *) [TS-898] "fixed" problems reported by Coverity + 1. Changed sprintf to snprintf + 2. ignore error on sscanf that is safe + + *) [TS-807] no config item "proxy.config.hostdb.disable_reverse_lookup" in + hostdb. Author: weijin. + + *) [TS-883] Fix help / usage text to be, ehm, correct. + + *) [TS-567] Cleanup, removing unecessary, and unsupported, debug features. + + *) [TS-848] fix crash in net pages and remove useless option. + + *) IpLookup was removed from the experimental API. + + *) proxy.config.http.cache.cache_responses_to_cookies can now be overridden + on a per request basis in cache.config. + + *) [TS-816] Other ports now obey specified options for both normal + and standalone usage. + + *) [TS-882] traffic_logstats dies when printing log. + + *) [TS-804] libcap required when running standalone. + + *) [TS-730] Allow for the SSL Cipher Suite to be configured. + + *) [TS-881] Better error message from TrafficCop when admin user lookup + fails. + + *) [TS-875] TSFetchRestpGet(), TSFetchPageResptGet() and TSFetchUrl() have + incorrect asserts. Author: Manjesh Nilange. + + *) [TS-853] Fix a few example plugins to use the new (appropriate) sockaddr + based APIs (and not the deprecated APIs). + + *) [TS-870] Fix evacuate relevant codes in cache to work, Author: mohan_zl + + *) [TS-869] The stat code for ram_cache miss is lost, Author: mohan_zl + + *) [TS-873] Wrong code in iocore/net/UnixNet.cc, Author: mohan_zl + + *) [TS-833] Continuation::handleEvent deadbeef fix, authors jplevyak and + taorui. + + *) [TS-834] InactivityCopy::check_inactivity crash. + + *) [TS-864] Need more information from CacheHttpInfo (req time, resp time, + size). Author: William Bardwell. + + *) [TS-860] Built in error for host not found looks like Internet Explorer + error. Author: William Bardwell. + + *) [TS-861] Need a way to disable Vary: Accept-Encoding checking so a plugin + can take care of that. Author: William Bardwell. + + *) [TS-862] Need to be able to make keep alive connections not shared on a + per-transaction basis. Author: William Bardwell. + + *) [TS-865] Need to get address for a VConn from a plugin similar to how you + can get it for the various things in a transaction. Author: William + Bardwell. + + *) [TS-868] build fails with --as-needed. Author: Ilya Barygin. + + *) [TS-851] run TS without a real interface + + *) [TS-822] make cluster thread number configable + + *) [TS-845] make proxy.config.cluster.ethernet_interface default to + loopback interface: lo on linux and lo0 on bsd derivatives + + *) [TS-847] Bad timeout when using CONNECT method. + + *) [TS-826] TSHttpTxnErrorBodySet() can leak memory. Author: William + Bardwell. + + *) [TS-840] Regression checks fail (again) due to faulty assert use. + Author: Arno Toell. + + *) [TS-842] remove uninstall target from build system + + *) [TS-648] Use hwloc library when available. This can be disabled + with --disable-hwloc. + + *) [TS-398] Autoscaling threads vs Hyper Threading. This requires + support for hwloc as well. + + *) [TS-760] Cleanup mgmt types. + + *) [TS-359] Remove DIR_SEP and use slash directly. + + *) [TS-839] Build problems when specifying lmza location. + + *) [TS-828] Various memory leaks and uninitialized values. Author: + William Bardwell. + + *) [TS-815] make sure that a missing yacc/bison|flex/lex is *fatal* when + building with wccp. Author: Arno Toll. + + +Changes with Apache Traffic Server 3.0.0 + + *) [TS-827] TSMimeHdrFieldValueStringInsert() can use freed memory to + edit headers. Author: William Bardwell. + + *) [TS-825] negative caching caches responses that should never be + cached: Author: William Bardwell. + + *) [TS-820] Restore log buffer size to previous defaults. + + *) [TS-818] Assertion/abort when starting TS with SOCKS proxy enabled. + Author: Yakov Markovitch + + *) [TS-810] Typo in switch statement + slight improvement. + + *) [TS-809] ts.h broken when compiling C plugins. + + *) [TS-798] We add broken remap rules when we encounter parse errors of + remap.config. + + +Changes with Apache Traffic Server 2.1.9 + + *) [TS-805] HostDB wastes a lot of storage for round-robin entries, + and the calculations of size are off. + + *) [TS-806] TS_ADDTO removes duplicates, so avoid this by using the + -R option to libtool + + *) [TS-793] Improve print statements for ink_freelist debugging. + + *) [TS-679] The external API was changed to make it IPv6 compliant + (although it doesn't actually work with IPv6). Old API functions + were deprecated but not removed. + + *) [TS-797] Wrong delete used in stats processor. + + *) [TS-769] Fixed infinite loop when getting a 505 response from the + origin and the connection is keep-alive. Now downgrading keep-alive + all the time along with the protocol. + + *) [TS-788] Cleaned up the request and response cacheable apis. + + *) [TS-792] Add a config option (disabled by default) to support + mlock() and mlockall(). + + *) [TS-783] Port ATS to IA64. Author: Arno Toell. + + *) [TS-778] Compile Fails on Solaris 10 (gcc). Author: Igor Brezac. + + *) [TS-404] Add a new API, TSOSIpSet() which allows you to bypass the + origin server DNS lookup. + + *) [TS-791] Remove ShmemClean.cc, it's no longer needed. + + *) [TS-786] Add a perl module to edit a records.config configuration + file. + + *) [TS-779] Set thread name for various event types. + + *) [TS-784] Don not use class allocator for remap processing when no + remap threads are enabled. + + *) [TS-782] Remap processor creates a remap thread even when asked not + to. + + *) [TS-781] Cleanup of unusual configs, and better defaults making + records.config leaner, and a little more useful. + + *) [TS-780] Retune the number of SSL threads. + + *) [TS-775] Disable cluster autodiscovery via multicast when + clustering is disabled. This should hopefully fix run-time errors + with Ubuntu 11.x. + + *) [TS-776] memchr in glibc has evolved, and is faster than our + version, replaced. + + *) [TS-774] Add a new configure option, --enable-static-libts, which + avoids the dynamic linking hassles involved with the dynamic nature + of libts. This is for devs only. + + *) [TS-773] Traffic server has a hard limit of 512 gigabytes per RAW + disk partition. This fix required changing the disk structure which + will result in a total disk cache clear (wipe) after upgrading. + + *) [TS-772] Make proxy.config.http.doc_in_cache_skip_dns overridable. + + *) [TS-770] proxy.config.http.doc_in_cache_skip_dns is not being read + from records.config. Author: Yakov Markovitch + + *) [TS-738] 'make check` fails on x86. + + *) [TS-771] Remove remaining v1 log buffer code. + + *) [TS-562] Make --with-openssl path be honored with an -rpath to + libtool. This also fixes the same problem with other libraries, + e.g. pcre, zlib etc. + + *) [TS-765] Make the backdoor port (8084 by default) only listen on + 127.0.0.1 . + + *) [TS-762] Range values like -10 are processed. Author: William + Bardwell. + + *) [TS-761] Fixed bug where 3 (or more) remap plugins in a chain + couldn't be loaded. + + *) [TS-763] When creating multiple SSL accept threads, we use the + wrong instantiator. + + *) [TS-757] Change TSNetAccept() API to take an option for enabling + (and number of) accept threads. + + *) [TS-759] Makefile in proxy/config handles $DESTDIR incorrectly. + Author: Arno Toell + + +Changes with Apache Traffic Server 2.1.8 + + *) [TS-750] TS does not fail-over if one origin server for a 2 address + hostname. Author: William Bardwell. + + *) [TS-752] If you cancel a scan really quickly you can get a NULL + dereference. Also other important performance and correctness fixes + for the cache scanning code. Author: William Bardwell and jplevyak. + + *) [TS-749] Connection hangs if origin server goes down in the middle of + a response. Author: William Bardwell. + + *) [TS-753] TS-753 Some more cleanup in InkAPI, move a few experimental + APIs to ts.h + + *) [TS-751] Experimental TSHttpTxnCacheLookupStatusSet(HIT_STALE) calls + cause a crash. Author: William Bardwell + + *) [TS-748] Client side transparency doesn't work on trunk. + + *) [TS-702] FATAL: MIME.cc:1250: failed assert `j < block_count`. + Author: Yakov Markovitch + + *) [TS-746] Allow to remove URL fields with "NULL" (or 0) values. + + *) [TS-744] Configurations to control SSL session reuse and cache + size. Authors: qianshi and Leif + + *) [TS-716] Bug where NetVC could be double free'd. + Fix for DNS crash: bad memory management of HostEnt structures. It is + not clear that this fixes the bug entirely. Some of the stack traces + are consistent with this bug, but some are not. + + *) [TS-743] Support separate configs for keep-alive enabled for _in + and _out connections. + + *) [TS-741] traffic_manager handles sockets incorrectly. + + *) [TS-742] assert triggered wrongly (in debug builds). + + *) [TS-740] Traffic Server fails to build on kfreebsd. + Author: Arno Toell. + + *) [TS-737] Small hackish fix for rc/trafficserver.in so rc/trafficserver + will work with FreeBSD. Author: G Todd. + + *) [TS-735] Disable ccache by default, use with --enable-ccache. + + *) [TS-734] Remove unused fields in net stats pages. + + *) [TS-212] Startup service support for Solaris. Author: Igor Brezac. + + *) [TS-629] fix some non-portable char == unsigned char assumptions. + + *) [TS-621] Update records.config.default.in with changed / removed + configs. + + *) [TS-641] Remove a bunch of Web UI related configs and code. + + *) [TS-719] libtsutil.so is not self-contained. + Author: Igor Brezac. + + *) [TS-729] Fix bugs with Via Headers handling. (Note: This is + unlikely to have caused the crash the bug report) + Author Leif Hedstrom + + *) [TS-721] Incorrect http hit ratio in stats. + This also removes a number of obsoleted stats and also disables + stats aggregation in WebOverview.cc, one more nail to WebUI's + grave. Author: Leif Hedstrom + + *) [TS-728] Remove the --enable-webui option, since it doesn't + produce a running webui anyway. Also remove html2 + + *) [TS-685] Rename partition.config because it doesn't have + anything todo with disks. Also rename all code related to it + so as not to confuse anybody. + + *) [TS-714] Fix traffic_shell hanging on every command + + *) [TS-562] Fix TCL linking to honor custom library dirs. + Author: Eric Connell. + + *) [TS-624] make install should not overwrite etc/trafficserver/. + Author: Eric Connell. + + *) [TS-465] Allow for existing Server: string to not be overwritten. + This adds a new semantic for the value "2" to this option. + + *) [TS-633] Fix reverse mapping with different schemes. + Author: Andreas Hartke. + + *) [TS-715] Fixes and cleanup for Perl client. Author: Billy Vierra. + + *) TS-550 Remove an unused / unsupported debug tool. Also update the + remap code to use our standard linked list (Queue in this case). + + *) [TS-704] Link traffic_server dynamically to make distros happy, + since --disable-static will work. + + *) [TS-545] Clean out more cruft from MIXT legacy. + + *) [TS-713] Honor the offset within do_io_pread. + + *) [TS-712] Fix compile problems with clang / llvm + + *) [TS-545] parent.config (and perhaps other configs) have an unused + concept of "tags" for MIXT media. Cleanup remaining MIXT junk. + + +Changes with Apache Traffic Server 2.1.7 + + *) [TS-711] Don't schedule remap.config reloads on a network threads. + We now schedule this task on an ET_TASK thread, which avoids blocking + a net-thread for several seconds (when reloading very large remaps). + + *) [TS-710] Do not dlopen / reload a remap plugin .so more than once. + + *) [TS-588] Change Remap plugin APIs to use URL TSMLoc, and normal + ts/ts.h APIs for getting and setting components. + + *) [TS-708] TsConfig doesn't handle backslashes correctly. + + *) [TS-209] add support for raw disk on Solaris: credits: Igor Brezac + for both the code and testing! + + *) [TS-705] Fixes for compiling with gcc v4.6. + + *) [TS-706] hardware sector size's over 8K current report an Error + but are passed through resulting in lots of disk waste. + + *) [TS-707] The random number generator from 1-23-2011 is using the + same seed for all threads = collisions in the cache + + *) [TS-700] Need additional controls in cache.config. + + *) [TS-696] make check fails on libtsutil due to missing libresolv + and librt. Author: Eric Connell. + + *) [TS-691] LogFilter not working for "int" types. + Author: Eric Connell. + + *) [TS-692] Add an experimental API to modify the outgoing IP address. + + *) TS-676: logic in Store::clear and Store::read_config is wrong. + Author: mohan_zl. + + *) [TS-680] Change many typedef void* types to anonymous structs. + + *) [TS-690] Schedule some callbacks on the ET_TASK threads + + *) TS-689 Restore TSMgmtUpdateRegister() to the SDK APIs. + + *) [TS-550] Remove MgmtPlugin.{cc,h}. + + *) [TS-657] Proper validation of RWW settings on startup. + + *) [TS-688] Remove the "tag" modifier from parent.config. + + *) [TS-682] Segfault when partition.config is used. + + *) [TS-687] Build failures on FreeBSD8 + + *) [TS-684] config.layout for gentoo linux, may also be used on Fedora + + *) [TS-675] Make redirect and reverse maps work again. + + +Changes with Apache Traffic Server 2.1.6 + + *) [TS-678] Add a config option for try-lock retry delay. + + This adds a configuration option + + proxy.config.cache.mutex_retry_delay INT 2 + + 2ms seems to be fairly optimal, with little detrimental effect on CPU + usage. We'll fine tune this further in the next release. + + *) [TS-674] Fixes for cache.config and the "modifiers" to work. + + *) [TS-641] Remove inktomi*.css and some files only referenced by it. + Removing mgmt/html2/charting. Remove the now empty mgmt/html2/tune. + + *) [TS-590] Cleanup all SDK APIs to be more consistent. This changes a + large number of APIs, so please check updated docs and signatures in + ts/ts.h. A new tools, tools/apichecker.pl, can be used to help + identifying areas in existing plugins that might need changes. + + *) [TS-673] Make the default configurations more conservative + for when content is cacheable. + + *) [TS-672] Remove unused/unreferenced Win32 header files and + code paths + + *) [TS-671] Detect install group based on install user. + + *) [TS-644] Fix clustered cache pages crash. + + *) [TS-651] Clear all stats when we ask to clear the local stats. + + *) [TS-489] Remove the "connection collapsing" feature, it was poorly + implemented, and caused major problem if enabled. We should redo this + for v3.1 in a way that fits with the HttpSM [author: mohan_zl]. + + *) [TS-668] Add support for URL stats to traffic_logstats. + + *) [TS-665] Remove HTTP_ASSERT from the code base, use standard asserts. + + *) [TS-663, TS-664] Fixes to WCCP with mask assignments, and trunk build + problems. + + *) [TS-662] Make per partition stats for bytes_used work. + + *) [TS-661] Delay the copy of per transaction configs until a plugin + actually tries to modify a setting. We also add these settings to the + list of configurations that is overridable: + + proxy.config.http.cache.max_open_read_retries + proxy.config.http.cache.open_read_retry_time + + *) [TS-660] Cache scan can not be canceled. + + *) [TS-505, TS-506] Changed the defaults to deal with read contention on + the cache, this dramatically improves the performance on cache misses. + + *) [TS-655] Reorganize some code to reduce binary foot prints. + + *) [TS-653] Bogus logcat conversion of squid timestamps. + + *) [TS-643] Unable to purge objects on other servers in full cluster mode. + + *) New 64-bit random generator. + + *) [TS-639] Rename the management APIs from INK* to TS*. + + *) [TS-650] Remove the dead v2 stats code. + + *) [TS-649] Dynamic libraries for mgmt APIs. + + This makes libts -> libtsutil, and we now support making .so's for + libtsutil.so and libtsmgmt.so. All binaries are changed to use this, + except traffic_server which continues to use the libtsutil.a library + (for performance on e.g. 32-bit platforms). + + This also renames the public API include file to be + + #include + + *) [TS-647] Move Layout out of iocore and into lib/ts. + + *) [TS-638] Rename various directories: + proxy/mgmt -> mgmt/ + proxy/mgmt/cop -> cop/ + + All "cli" APIs are now also migrated into mgmt/cli, unified into one + single cli. + + *) [TS-641] Cleanup of Web2/HTML2. + + *) [TS-636, TS-637] Remove various unused source files. + + *) [TS-631] Rename proxy/http2 -> proxy/http and proxy/mgmt2 to proxy/mgmt. + + *) [TS-582] Add an example to records.config for how to bind a specific IP. + + *) [TS-491] Cluster port was activated even with clustering disabled. This fix also + adds monitoring support for the "cli" unix domain socket. + + *) [TS-593] Cleanup of inktomi.com. + + *) [TS-324] Cleaning up some old TCL files and dependencies. + + *) Remove traces of FTP references [TS-324] by purging the now useless TCL + bindings to it. + + *) [TS-513] Fix configure issues for sqlite3. This fix eliminates all of + SimpleDBM, sqlite3 and bdb dependencies. It also fixes the "make + distclean" problem, and clean things up a bit. + + *) [TS-491] Add the CLI interface to Traffic Cop, and make it possible to + run traffic_manager without listening on the cluster port. + + *) [TS-583] Build fails if --disable-webui is added. + + *) [TS-618] Removing traces of CCAS/CCASFLAGS. + + *) [TS-627] Fixes for "make check" to succeed (author: Arno Toell). + + *) [TS-632] Fixes for bad cast and cleanup for Intel CC. + + +Changes with Apache Traffic Server 2.1.5 + + *) More 64-bit issues has been identified in the SDK and HTTP core, and + fixed [TS-620]. + + *) Code cleanup of old transparency code and options [TS-613]. + + *) We now require a compiler (or libc) that provides atomic + operations. This includes gcc 4.1.2 or later, Intel CC, clang (recent + versions) as well as Sun's Solaris compilers and libc [TS-618]. + + *) Support normal default path for remap plugins [TS-616]. + + *) Change default settings for MSIE User-Agent sniffing [TS-615]. + + *) Remove remnants from InktoSwitch. This removes the following + configurations [TS-614]: + + proxy.config.http.inktoswitch_enabled + proxy.config.http.router_ip + proxy.config.http.router_port + + *) Modify TSContSchedule to take a thread type, and add + TSContScheduleEvery [TS-589]. + + *) Added support to allow some select (~50 or so) records.config + configurations to be overridable per transaction. This is done via new SDK + APIs, as well as a remap plugin provided with the core [TS-599]. The new + APIs available are + + TSHttpTxnConfigIntSet() + TSHttpTxnConfigIntGet() + TSHttpTxnConfigFloatSet() + TSHttpTxnConfigFloatGet() + TSHttpTxnConfigStringSet() + TSHttpTxnConfigStringGet() + TSHttpTxnConfigFind() + + *) Eliminate dedicated default DNS for SplitDNS [TS-597]. Author: Zhao + Yongming. + + *) Eliminate proxy.config.net.max_poll_delay configs [TS-605]. + + *) Old traffic_net configurations are eliminated [TS-601]. + + *) Multiple preads: this patch is only active if you call do_io_pread on + the cache. This includes a regression test for do_io_pread which is at + least a smoke test of the new code. [TS-61] + + *) Migrate from home-grown regular expression classes to pcre [TS-604] + + *) Reduce number of calls to regex matcher for standard requests with + well-known-strings (WKS). [TS-603] + + *) Parse Range: requests a bit better. Prior, a request like like Range: + bytes=100-200 would return 0-200. Additionally, Range: bytes=100- would + not parse properly. [TS-596] + + *) Remove old, unused configuration code (duplicated) [TS-576]. + + *) Bump the SDK version numbers properly [TS-595]. + + *) Migrate from our own int64 (et.al) to int64_t / stdint types. This also + changes the SDK, so ts/ts.h users should now use int64_t etc. [TS-594]. + + *)./configure will now tell us more about its defaults. + + *) Add better tests for eventfd, making sure sys/eventfd.h exists + [TS-515]. + + *) Stub / base implementation for the Task thread pool [TS-589]. + + *) Cleanup of traffic_logstats, and also add support for JSON + output. Several new options and query args added to select output format + and data [TS-587]. + + *) Add back support for using the default DNSHandler from DNS.cc This + helps with SplitDNS [TS-580]. + + *) Fixes for DNS to properly schedule and initialize [TS-570]. + + *) Fix make check so it actually compiles [TS-586]. + + *) Remove RAF pieces [TS-584]. + + *) Replace the SDKAllocator with a ClassAllocator [TS-577]. + + This reduces the amount of memory allocation for plugins, but requires + that plugins now religiously release the handles that they are expected + to release (i.e. no automatic gc is done for lazy developers). + + *) Fixes for getting the altinfo regression check to succeeds [TS-171]. + + *) Fixes for some transform, and other, mismatches of int vs int64 in the + new APIs and underlying cache [TS-456]. + + *) Eliminate misguided string copies in the SDK [TS-579]. + + *) Fix build of ts.h and tsxs when the .in files changes [TS-574]. + + *) Move libinktomi++ and librecords to lib/ts and lib/records [TS-571]. + + *) Add SDK API calls to directly get the elements of the running TS + version [TS-568]. + + *) Added WCCP support. + + *) Bring IPv6 functionality back to trunk, for incoming (client) + connections [TS-18]. Original author: Tsunayoshi Egawa. + + *) Segfault with HTTPS, fixed by correctly initializing SSLNetVConnection + being added to freelist [TS-559]. + + *) The old logs.config custom log format is no longer supported. Only the + XML custom logs are now supported. This eliminates the config + + proxy.config.log.xml_logs_config + + as well, since it's the only option for custom logs [TS-556]. + + *) All log configurations (and stats) are renamed from log2.* to + log.*. This is to avoid confusion, since Apache Traffic Server never had + the old (obsolete) log system. There's now only one log system, log + [TS-555]. + + *) Many fixes and improvements on the Stats pages subsystem. This now + properly supports (if configured) various internal URLs, like + http://{net}, http://{hostdb} etc. [TS-554]. + + *) The NewCacheVC is removed [TS-551]. + + *) Support for the Alpha processor is eliminated [TS-552]. + + *) A number of unecessary memory allocations are removed, improving + performance under heavy load. [TS-550, TS-549] + + *) All streaming media (MIXT) configurations are now properly removed from + code and default configs [TS-544]. + + *) URL scheme was case sensitive in the cache key [TS-474]. + + *) Fixes for broken API signatures, additions / modifications to the + following API: + + TSReturnCode TSHttpTxnArgSet(TSHttpTxn txnp, int arg_idx, void *arg); + TSReturnCode TSHttpTxnArgGet(TSHttpTxn txnp, int arg_idx, void **argp); + TSReturnCode TSHttpSsnArgSet(TSHttpSsn ssnp, int arg_idx, void *arg); + TSReturnCode TSHttpSsnArgGet(TSHttpSsn ssnp, int arg_idx, void **argp); + + TSReturnCode TSHttpArgIndexReserve(const char* name, const char* + description, int* arg_idx); TSReturnCode TSHttpArgIndexNameLookup(const + char* name, int* arg_idx, const char** description); TSReturnCode + TSHttpArgIndexLookup(int arg_idx, const char** name, const char** + description); + + TSReturnCode TSHttpSsnTransactionCount(TSHttpSsn ssnp, int* count); + + This was all combine into [TS-504], but also see [TS-503]. + + *) Many fixes for broken regression tests! + + *) RNI is now completely cleaned out [TS-536]. + + *) Fixes for SplitDNS (co-author: mohan_zl) [TS-435]. + + *) HTTPS to origin servers, with Chunked responses, would hang [TS-540]. + + *) Mismatched APIs using "unsigned char*" [TS-458]. + + *) Rename / modify TSSetCacheUrl() API, the new prototype is + + TSReturnCode TSCacheUrlSet(TSHttpTxn txnp, const char *url, int + length); + + If length == -1, then the API will calculate it using strlen() [TS-520]. + + *) All public APIs, structs and defines are now prefixed with "TS" instead + of the old "INK". There are two exceptions, for the deprecated INKStats* + and INKCoupledStats* APIs [TS-521]. + + *) The hooks around "remap" has been organized, and a new hook as been + added (for post-remap). New / renamed hooks are + + TS_HTTP_PRE_REMAP_HOOK TS_HTTP_POST_REMAP_HOOK + + In addition, a new API was added, to allow a plugin to skip the remap + phase completely: + + TSReturnCode TSSkipRemappingSet(TSHttpTxn txnp, int flag); + + These fixes went in with [TS-529] and [TS-530]. + + *) INKHttpTxnSetHttpRetStatus not honored when an API transaction is + reenabled with INK_EVENT_HTTP_ERROR [TS-535]. + + *) Various defines for version identification has been moved to the public + ts/ts.h include file, e.g. + + #define TS_VERSION_STRING "2.1.6-unstable" + #define TS_VERSION_NUMBER 2001006 + #define TS_VERSION_MAJOR 2 + #define TS_VERSION_MINOR 1 + #define TS_VERSION_MICRO 6 + + The intended use is for plugins to be able to verify available APIs at + compile time (vs the existing runtime checks) [TS-534]. + + *) Traffic Server should now build on ARM processors. Commit message is + appropriately describing the situation with this CPU: + + This is a sad day of defeat. Not my defeat, but more a collective human + defeat. + + Question: "Chips fabricated today don't have 64bit atomic primitives?" + Answer: "Be sad." + + The ARM box we're working on (armv5tejl) doesn't support any 64bit + primitives. + + This means we need a method of using a global (yes, giant lock of death) + to protect modifications of arbitrary 64bit integers in process space. + We could make this less contentious by allocating pagesize/8 mutexs and + then protecting an int64 based on its page offset. Instead, I think we + should mobilize to burn these architectures to the ground and use public + embarrassment to fix future instruction sets. If another platform has + this issue, we'll want to change the define to: + + TS_ARCHITECTURE_LACKS_64BIT_INSTRUCTIONS and turn on the global death + lock based on that. + + This does not change performance on any other platform -- it's compile + time capital punishment. [TS-533] and [TS-135]. + + *) Very old APIs, that have been deprecated since long before the Apache + Open Source project, are removed. Also, only three public include files + are now available: + + ts/ts.h ts/experimental.h ts/remap.h + + Various other cleanup related to the APIs was also done [TS-522]. + + +Changes with Apache Traffic Server 2.1.4 + + *) Fixes to clustering, that caused an assert to trigger after the stats + changes [TS-519]. + + *) Make the checks when to honor the Content-Length: header less strict, + against origins without Keep-Alive [TS-500]. + + *) Eliminate old ssl_ports feature, it's completely replaced with the + connect_ports configuration [TS-517]. + + *) New script available to help build plugins, tsxs [TS-512]. + + *) Simple, brute force (and efficient) status code stats counters + [TS-509]. + + *) Generalize RecDumpRecordsHt to use RecDumpRecords which is a + callback/map pattern [TS-508]. + + *) Fix plugin APIs to be compatible with the 64-bit changes in the + core. This is an incompatible change with previous releases [TS-14]. + + *) Fixes for stats around origin connection counters, used when allowing + for origin connections to be reused between clients [TS-501]. + + *) Experimental supoprt for a dedicated DNS thread. This can be enabled + with the records.config option + + CONFIG proxy.config.dns.dedicated_thread INT 1 + + This feature is possibly useful for very busy forward or transparent + proxies [TS-307]. + + *) Accept threads can leak some amount of memory. This patch also supports + multiple accept threads (very experimental!) [TS-496]. + + *) HttpSM has an assertion that checks the client URL against the cache + URL, which breaks INKSetCacheUrl [TS-495]. + + *) Return value from pcre_exec tested incorrectly [TS-493]. + + *) Improved loop detection using the Via header [TS-490]. + + *) Fixes for Solaris build (yay, it builds!). + + *) Remove filter.config remnants [TS-486]. + + *) Cleanup in InkAPI [TS-485]. + + *) Move PKGSYSUSER to ink_config.h.in [TS-482]. + + *) Unresponsive server can stall ATS [TS-480]. + + *) UrlRewrite cleanup [TS-434]. + + *) Build TS with clang (author: Igor Galic) [TS-427]. + + *) Better support and handling of DNS round-robin options (author: Zhao + Yongming) [TS-313]. + + *) Make it possible to "write" Content-Length headers > 2GB [TS-471]. + + *) Better support for Age: headers, and avoiding overflows [TS-470]. + + *) Added a configure option to size the API stats "slots". The new option + is --with-max-api-stats= [TS-454]. + + *) In Cache.cc, make snprintf() around Debug statements conditional for + performance [TS-459]. + + *) Cleanup / optimize Via: string generation [TS-460]. Also make the + default for Via: on responses to be disabled (it can leak info). + + +Changes with Apache Traffic Server 2.1.3 + + *) Removed the remnants of NCA from the source [TS-455]. + + *) New plugin APIs for stats, and making the "v2" (incomplete) stats + experimental (no longer built by default) [TS-390]. See + https://cwiki.apache.org/confluence/display/TS/NewStatsAPI for more + details. + + *) Cleanup in duplicated configs, and obsoleted configs [TS-447] and + [TS-451]. + + *) Remove some remnants of SNMP [TS-401]. + + *) Cleanup of MIX and LDAP/NTLM remnants [TS-443]. + + *) Make the target fragment size configurable for the disk cache. This + adds a new option, proxy.config.cache.target_fragment_size [TS-445]. + This should dramatically improve large file disk performance. + + *) Improve build include dependencies [TS-442]. + + *) Cleanup / fixes for remap plugin chaining [TS-224]. + + *) Support the rc/trafficserver script for FreeBSD [TS-211]. + + *) traffic_shell shows wrong RAM cache size > 2GB [TS-439]. + + *) Better warnings / errors when bad NIC is configured [TS-327]. + + *) Add support for hardware sector sizes 512 - 8192 bytes (e.g. 4096, the + new standard). Autodetected for raw devices on Linux (no support for + other OSes yet), and added a new configuration + + CONFIG proxy.config.cache.force_sector_size INT 4096 + + This change invalidates the entire cache as well, since it's no longer + compatible [TS-43]. + + *) Added APIs to override the cacheablity of the response [TS-395]. + + *) Add OSX support to 'trafficserver' script (author: Dan Mercer) + [TS-210]. + + *) Fix for (very) large buffers fed to the cache [TS-413]. + + *) Forward transparency is available on Linux kernels with TPROXY + [TS-291]. + + *) Fix defaults / max for DNS retries [TS-424]. + + *) Improvements for Perl admin module (author: Adam Faris) [TS-418]. + + *) Problems with specifying separate config files for SSL certificates and + keys [TS-405]. + + *) Logging: Default settings for diagnostic logging [TS-55]. + + *) Fixes to Debian layout (author: Igor Galić) [TS-415]. + + *) Remove DNS proxy support [TS-422]. + + *) rc/trafficserver start/stop quits with bogus status on success (author: + Igor Galić) [TS-429]. + + *) Increase default max in-flight DNS queries [TS-423]. + + *) Update so the pristine URL will work for reverse and forward proxy. + Also, clearing the url on transaction close (author: Wendy Huang) + [TS-410]. + + *) TS fails to use user ID with user name > 8 characters (author: Yakov + Markovitch) [TS-420]. + + *) Duplication of RAM cache hits and miss statistics (reading 2x) (author: + John Plevyak) [TS-453]. + + +Changes with Apache Traffic Server 2.1.2 + + *) Improvements in resilience against DNS poisoning and forging of + response packets [TS-425] and [CVE-2010-2952]. + + *) Segmentation fault in INKError when error output is made both in error + log and as debug messages (author: Yakov Markovitch) [TS-419]. + + *) Debian layout for config.layout (author: Igor Galic) [TS-415]. + + *) Eliminate extraneous stats thread [TS-411]. + + *) CACHE_FRAG_TYPE is now not a power of 2 [TS-76]. + + *) Remove unnecessary stats update [TS-390]. + + *) Get basic features to compile with Intel CC [TS-400]. + + *) More 64 bit issues, this time in the PluginVC code [TS-380]. + + *) Add configure option to enable detailed logging [TS-392]. + + *) Make sure to honor user settings for "dirs" (author: Theo Schlossnagle) + [TS-399]. + + *) Errors on failing to bind / listen on a specified port [TS-247]. + + *) Exempt quick filter for 127.0.0.1 [TS-397]. + + *) Cleanup after "layout changes" (author: Zhao Yongming) [TS-389]. + + *) Fix remaining (non-API) INK64 etc. [TS-365]. + + *) Segfault when using show:network [TS-109]. + + *) Update all examples to use non-deprecated APIs [TS-266]. + + *) Do some cleanup on Connection::fast_connect and + Connection::bind_connect (author: Alan M. Carrol) [TS-320]. + + *) Remove LLONG config option [Ts-364]. + + *) Cleanup some proxy/mgmt2/tools [TS-16]. + + *) Cleanup a little more of webui [TS-91]. + + *) TCL missing [TS-326]. + + *) logstats does not work with layout changes (author: Zhao Yongming) + [TS-385]. + + *) Convert bogus IOCORE_MachineFatal and IOCORE_ProcessFatal to Warning + and MachineFatal respectively based on review of the code and related + uses [TS-144]. + + *) INKIOBufferReaderCopy, INKIOBufferWrite should take void * instead of + char * [TS-67]. + + *) Adds APIs for aio disk read and writes using the internal aio support + in iocore (author: Wendy Huang) [TS-387]. + + *) Solaris 10 (x86) 64-bit patch (author: Igor Brezac) [TS-388]. + + *) Fix for 64-bit conversion [TS-385]. + + *) Creating transaction specific 'to URL' in case of regex remap match + [TS-384]. + + *) Backing out m_capacity_host changes [TS-383] + + *) Solaris 10 port work. + + +Changes with Apache Traffic Server 2.1.1 + + *) Allow SI decimal multipliers for numeric configuration parameters + [TS-361]. + + *) Standardize configure options by allowing to specify the location for + any third-party library, and split library detection code into separate + .m4 files [TS-345]. + + *) Reorganization of the path layout system. Add --enable-layout=LAYOUT + configure option that can select layout from config.layout file + [TS-280]. + + *) HTTP state machine is now 64-bit "clean", allowing for caching and + proxying documents larger than 2GB [TS-34]. + + *) Fix for truncated Content-Type on TS-generated responses [TS-290]. + + *) Performance improvements on cache for larger(ish) objects. + + +Changes with Apache Traffic Server 2.1.0 + + *) Support for many more platforms, including FreeBSD, MacOSX and Solaris. + + *) Code cleanup to get the ATS software into a distributable shape. This + means that certain things are missing, or not functional (intentionally). + + *) Support for larger Cache Partitions up to .5 PB (Petabytes), reducing + seeks/write. + + *) Reduced Cache miss latency (sub millesecond). + + *) RAM Cache pluggability, new algorithm (CLFUS) and optional compression. + + *) Support for TCL v8.6 and later [TS-246]. + + *) The cache is now 64-bit "clean". + + +Changes with Apache Traffic Server 2.0.1 + + *) Port of CVE-2010-2952 for 2.0.x [TS-425]. + + *) Backport part of TS-322 that deals with indexing arrays with char + (author: Marcus Ruckert) [TS-334]. + + *) Backport TS-336 to 2.0.x. Problems with make install DESTDIR=... + + +Changes with Apache Traffic Server 2.0.0 + + *) Change SDK versioning schemes to 2.0 [TS-249). + + *) Minor additions to the SDK (see the docs for details). + + *) Support regexe_map rules in remap.config [TS-80] + + +Changes with Apache Traffic Server 2.0.0-alpha + + *) Code cleanup to get the ATS software into a distributable shape. This + means that certain things are missing, or not functional (intentionally). + + *) Ports available for most Linux distros, including 64-bit. diff --git a/CRUFT.txt b/CRUFT.txt new file mode 100644 index 00000000..c8e06f2c --- /dev/null +++ b/CRUFT.txt @@ -0,0 +1,49 @@ +CRUFT +***** + +This file is designated for tracking cruft in our code. That is, code paths +or ugly hacks that were put into place for a reason: Getting stuff to work +but which could either been done better, or which time will obsolete. + +Examples are crude workarounds for broken compilers, libraries, OSes, or +hardware, or or massive ``#ifdef`` clusters, that are better abstracted away +into autoconf and convenience wrappers. + +If you are new to the project this is a good place to look for explanations +of why things are the way they are, or help us fix things and make the code +easier to read and maintain. + + +Store.cc +======== +``iocore/cache/Store.cc`` contains three different ``#ifdef`` clusters with +near identical code. Often you will read the same confused comment in three +places, see for instance: http://issues.apache.org/jira/browse/TS-1707 + +Web2 +==== +``mgmt/web2`` is very old, very broken, very deeply infested code that needs +to be removed as it is no longer appropriate, or functional. +Please see https://issues.apache.org/jira/browse/TS-641 for further refernece. + +Java +==== + +we should get rid of: ./example/protocol/test/*.java because, as zwoop says +"friends don't let friends write code in Java". + +Plugins +======= + +``geoip_acl`` should be a "helper plugin:, or a library/API which other +plugins can use. + + +Configuration & Defaults +======================== + +Right now our server doesn't work without a reasonably filled records.config +There are varying opinions on how this could or should be fixed, however one +issue that arrises is that default configuration options are often set ad-hoc +where ever they are needed as #defines. This should instead be consolidated +into a single place. diff --git a/INSTALL b/INSTALL new file mode 100644 index 00000000..597bb565 --- /dev/null +++ b/INSTALL @@ -0,0 +1,88 @@ +APACHE TRAFFIC SERVER UNIX QUICKSTART +------------------------------------- + +For complete installation documentation, see the on-line documentation at + + http://trafficserver.apache.org/docs + + +Various popular platforms have up-to-date build instructions available +online, at + + https://cwiki.apache.org/confluence/display/TS/Building + + +BUILD AND INSTALLATION +---------------------- + +Configure & Build from Git source tree: + $ autoreconf -if # generate the configure script and Makefile.in files + +On Linux, OSX: + $ ./configure [--prefix=PREFIX] + $ make + +On OpenSolaris and FreeBSD: + $ ./configure [--prefix=PREFIX] + $ gmake + +To install: + $ sudo make install + +* Building on FreeBSD requires GNU make. This is the preferred make + on all platforms. + +* Replace PREFIX with the filesystem path under which Traffic Server + should be installed. Default PREFIX is "/usr/local" (without the + quotes). + +* If you want to build using different layout see the config.layout + file for possible alternatives. You can create your own layout + and enable it with ./configure --enable-layout=ID, where ID is + the layout name you have created. + +* The easiest way to find all of the configuration flags + for Traffic Server is to run ./configure --help. + + +By default the build will use the highest level of compiler +optimization. To alter this, specify your own optimizer flags at +configuration time: + + $ ./configure CXXFLAGS=-Os CFLAGS=-Os + +To compile with an alternate set of compilers, e.g. LLVM/Clang + + $ ./configure CC=clang CXX=clang++ + + +INITIAL CONFIGURATION +--------------------- + +Next set the cluster interface in 'records.config'. The network +interface name will vary depending on the host operating system. + +For example: + Linux: CONFIG proxy.config.cluster.ethernet_interface STRING eth0 + OS X: CONFIG proxy.config.cluster.ethernet_interface STRING en0 + FreeBSD: CONFIG proxy.config.cluster.ethernet_interface STRING em0 + OpenSolaris: CONFIG proxy.config.cluster.ethernet_interface STRING e1000g0 + +To start the Traffic Server process stack (TC, TM, TS): + $ sudo PREFIX/bin/trafficserver start + + +INSTALLATION HELP +----------------- + +The Apache Traffic Server group cannot field user's installation +questions. There are many valuable forums to help you get started. +Please refer your questions to the appropriate forum, such as the +Users Mailing List at + + http://cwiki.apache.org/confluence/display/TS/Traffic+Server + +Thanks for using the Apache Traffic Server, version 5. + + The Apache Software Foundation + http://www.apache.org/ diff --git a/LAYOUT b/LAYOUT new file mode 100644 index 00000000..a6fb1d3f --- /dev/null +++ b/LAYOUT @@ -0,0 +1,32 @@ +The Traffic Server 3.x Default LAYOUT +-------------------------------- + +` ........................... Top-Level Traffic Server Directory +| +| +|-- bin ..................... Binaries +|-- etc ..................... Configuration files +| `-- trafficserver +| |-- body_factory +| | `-- default +| |-- internal +| `-- snapshots +|-- include +| `-- ts +|-- lib +| `-- trafficserver +|-- libexec +| `-- trafficserver +|-- share +| `-- trafficserver +| |-- configure +| | `-- helper +| |-- images +| |-- include +| |-- monitor +| `-- mrtg +`-- var + |-- log + | `-- trafficserver ... Log files + `-- trafficserver ....... Runtime data + diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..b2e6b61e --- /dev/null +++ b/LICENSE @@ -0,0 +1,427 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +~~~ + +Copyright (C) 2009 Yahoo! Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +~~~ + +Mersenne Twister License + + Copyright (C) 2004, Makoto Matsumoto and Takuji Nishimura, + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. The names of its contributors may not be used to endorse or promote + products derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +~~~ + +TK 8.3 License + +This software is copyrighted by the Regents of the University of +California, Sun Microsystems, Inc., and other parties. The following +terms apply to all files associated with the software unless explicitly +disclaimed in individual files. + +The authors hereby grant permission to use, copy, modify, distribute, +and license this software and its documentation for any purpose, provided +that existing copyright notices are retained in all copies and that this +notice is included verbatim in any distributions. No written agreement, +license, or royalty fee is required for any of the authorized uses. +Modifications to this software may be copyrighted by their authors +and need not follow the licensing terms described here, provided that +the new terms are clearly indicated on the first page of each file where +they apply. + +IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +MODIFICATIONS. + +GOVERNMENT USE: If you are acquiring this software on behalf of the +U.S. government, the Government shall have only "Restricted Rights" +in the software and related documentation as defined in the Federal +Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you +are acquiring the software on behalf of the Department of Defense, the +software shall be classified as "Commercial Computer Software" and the +Government shall have only "Restricted Rights" as defined in Clause +252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the +authors grant the U.S. Government and others acting in its behalf +permission to use and distribute the software in accordance with the +terms specified in this license. + +~~~ + +BIND license + +Copyright (c) 1985, 1989, 1993 + The Regents of the University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the University nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + + + +Portions Copyright (c) 1993 by Digital Equipment Corporation. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies, and that +the name of Digital Equipment Corporation not be used in advertising or +publicity pertaining to distribution of the document or software without +specific, written prior permission. + +THE SOFTWARE IS PROVIDED "AS IS" AND DIGITAL EQUIPMENT CORP. DISCLAIMS ALL +WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DIGITAL EQUIPMENT +CORPORATION BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + + + +Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC") +Portions Copyright (c) 1996-1999 by Internet Software Consortium. + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +~~~ + +Copyright (c) 1994-2011 John Bradley Plevyak, All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +~~~ + +Copyright (C) 2007 Ariya Hidayat (ariya@kde.org) +Copyright (C) 2006 Ariya Hidayat (ariya@kde.org) +Copyright (C) 2005 Ariya Hidayat (ariya@kde.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +For the strlcat, strlcpy in inktomi++/ink_string.cc: + +Copyright (c) 1998 Todd C. Miller + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 00000000..0be55a6e --- /dev/null +++ b/Makefile.am @@ -0,0 +1,120 @@ +# +# Top-level Makefile.am for Traffic Server. +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Local Macros +# http://www.gnu.org/software/automake/manual/automake.html#Local-Macros +ACLOCAL_AMFLAGS = -I build + +export CCACHE_BASEDIR + +# TODO: There's still some weirdness in the dependencies between proxy +# and mgmt, hence we have to build proxy/hdrs first. + +# proxy/api/ts has to be built first, since so much of libraries and "core +# depends on the generates ts/ts.h include file. + +SUBDIRS = proxy/api/ts iocore lib proxy/hdrs proxy/shared mgmt proxy cmd plugins tools example rc doc + + +DIST_BUILD_USER=`id -nu` +DIST_BUILD_USER_GROUP=`id -ng` +DISTCHECK_CONFIGURE_FLAGS=--with-user=${DIST_BUILD_USER} --with-group=${DIST_BUILD_USER_GROUP} + +EXTRA_DIST=CHANGES INSTALL STATUS NOTICE LAYOUT LICENSE example contrib README-EC2 REVIEWERS + +# Default value when creating release candidates +RC = 0 + +# igalic can't remember if this is make check or make test and neither should you. +test: check + +installcheck-local: + $(DESTDIR)$(bindir)/traffic_server -R 1 + +distclean-local: + -rm -f config.nice + +doxygen: + @cd doc && $(MAKE) $(AM_MAKEFLAGS) $@ + +asf-dist: asf-distdir + tardir=$(distdir) && $(am__tar) --mtime=./configure.ac | bzip2 -9 -c >$(distdir).tar.bz2 + @$(am__remove_distdir) + +asf-dist-rc: asf-distdir + tardir=$(distdir) && $(am__tar) --mtime=./configure.ac | bzip2 -9 -c >$(distdir)-rc$(RC).tar.bz2 + @$(am__remove_distdir) + +asf-distdir: + @$(am__remove_distdir) + test -d .git && git clone . $(distdir) + cd $(distdir) && git submodule update --init && autoreconf -i + rm -rf -- $(distdir)/autom4te.cache $(distdir)/.git $(distdir)/.gitignore $(distdir)/ci + +asf-dist-sign: asf-dist + md5sum -b $(distdir).tar.bz2 >$(distdir).tar.bz2.md5 + sha1sum -b $(distdir).tar.bz2 >$(distdir).tar.bz2.sha1 + gpg --armor --output $(distdir).tar.bz2.asc --detach-sig $(distdir).tar.bz2 + +asf-dist-sign-rc: asf-dist-rc + md5sum -b $(distdir)-rc$(RC).tar.bz2 >$(distdir)-rc$(RC).tar.bz2.md5 + sha1sum -b $(distdir)-rc$(RC).tar.bz2 >$(distdir)-rc$(RC).tar.bz2.sha1 + gpg --armor --output $(distdir)-rc$(RC).tar.bz2.asc --detach-sig $(distdir)-rc$(RC).tar.bz2 + +release: asf-dist-sign + git tag -fs -m "Release $(VERSION)" $(VERSION) + +rel-candidate: asf-dist-sign-rc + git tag -fs -m "Release Candidate $(VERSION)-rc$(RC)" $(VERSION)-rc$(RC) + +examples: all + @cd example && $(MAKE) $(AM_MAKEFLAGS) + +install-examples: examples + @cd example && $(MAKE) $(AM_MAKEFLAGS) install pkglibdir=$(pkglibexecdir) + +install-data-hook: + @cd doc && $(MAKE) $(AM_MAKEFLAGS) install-man + +rat: + java -jar $(top_srcdir)/ci/apache-rat-0.11-SNAPSHOT.jar -E $(top_srcdir)/ci/rat-regex.txt -d $(top_srcdir) + +submodules: + git submodule update --init + +help: + @echo 'all default target for building the package' && \ + echo 'check run the test suite, if any' && \ + echo 'clean remove whatever make created' && \ + echo 'distclean remove whatever configure created' && \ + echo 'dist DEPRECATED: recreate source package' && \ + echo 'examples make examples' && \ + echo 'asf-dist recreate source package' && \ + echo 'asf-dist-sign recreate source package, with checksums and signature' && \ + echo 'release recreate a signed release source package and a signed git tag' && \ + echo 'rel-candidate recreate a signed relelease candidate source package and a signed git tag' && \ + echo 'distcheck verify dist by performing VPATH build and then distclean' && \ + echo 'rat produce a RAT licence compliance report of the source' && \ + echo 'submodules update the git submodules' && \ + echo 'doxygen generate doxygen docs in doc/html dir' && \ + echo 'help display this list of make targets' && \ + echo 'install install by copying the built files to system-wide dirs' && \ + echo 'install-strip same as install but then strips debugging symbols' && \ + echo 'install-examples install examples by copying the built files to system-wide dirs' + diff --git a/Makefile.in b/Makefile.in new file mode 100644 index 00000000..62754da8 --- /dev/null +++ b/Makefile.in @@ -0,0 +1,1146 @@ +# Makefile.in generated by automake 1.13.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Top-level Makefile.am for Traffic Server. +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = . +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(top_srcdir)/configure $(am__configure_deps) \ + $(top_srcdir)/lib/perl/lib/Apache/TS.pm.in INSTALL README \ + build/aux/ar-lib build/aux/compile build/aux/config.guess \ + build/aux/config.sub build/aux/install-sh build/aux/missing \ + build/aux/ltmain.sh $(top_srcdir)/build/aux/ar-lib \ + $(top_srcdir)/build/aux/compile \ + $(top_srcdir)/build/aux/config.guess \ + $(top_srcdir)/build/aux/config.sub \ + $(top_srcdir)/build/aux/install-sh \ + $(top_srcdir)/build/aux/ltmain.sh \ + $(top_srcdir)/build/aux/missing +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/ax_boost_base.m4 \ + $(top_srcdir)/build/ax_check_openssl.m4 \ + $(top_srcdir)/build/ax_compare_version.m4 \ + $(top_srcdir)/build/ax_compiler_vendor.m4 \ + $(top_srcdir)/build/ax_cxx_compile_stdcxx_11.m4 \ + $(top_srcdir)/build/ax_lib_curl.m4 \ + $(top_srcdir)/build/ax_lib_readline.m4 \ + $(top_srcdir)/build/ax_path_generic.m4 \ + $(top_srcdir)/build/ax_perl_modules.m4 \ + $(top_srcdir)/build/ax_with_curses.m4 \ + $(top_srcdir)/build/common.m4 $(top_srcdir)/build/crypto.m4 \ + $(top_srcdir)/build/jemalloc.m4 $(top_srcdir)/build/libtool.m4 \ + $(top_srcdir)/build/ltoptions.m4 \ + $(top_srcdir)/build/ltsugar.m4 \ + $(top_srcdir)/build/ltversion.m4 \ + $(top_srcdir)/build/lt~obsolete.m4 $(top_srcdir)/build/lzma.m4 \ + $(top_srcdir)/build/network.m4 $(top_srcdir)/build/pcre.m4 \ + $(top_srcdir)/build/pkg.m4 $(top_srcdir)/build/tcl.m4 \ + $(top_srcdir)/build/tcmalloc.m4 $(top_srcdir)/build/xml.m4 \ + $(top_srcdir)/build/zlib.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/lib/ts/ink_autoconf.h +CONFIG_CLEAN_FILES = lib/perl/lib/Apache/TS.pm +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + cscope distdir dist dist-all distcheck +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +CSCOPE = cscope +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__post_remove_distdir = $(am__remove_distdir) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +DIST_TARGETS = dist-gzip +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +pkgdatadir = @pkgdatadir@ +pkglibdir = @pkglibdir@ +pkglibexecdir = @pkglibexecdir@ +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_CFLAGS = @AM_CFLAGS@ +AM_CXXFLAGS = @AM_CXXFLAGS@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +API_DEFS = @API_DEFS@ +AR = @AR@ +ASCPP = @ASCPP@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCACHE = @CCACHE@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL_CFLAGS = @CURL_CFLAGS@ +CURL_CONFIG = @CURL_CONFIG@ +CURL_LIBS = @CURL_LIBS@ +CURSES_LIB = @CURSES_LIB@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +EXPAT_LDFLAGS = @EXPAT_LDFLAGS@ +EXTRA_CC_LDFLAGS = @EXTRA_CC_LDFLAGS@ +EXTRA_CXX_LDFLAGS = @EXTRA_CXX_LDFLAGS@ +FGREP = @FGREP@ +FLEX_CFLAGS = @FLEX_CFLAGS@ +GEOIP_LIBS = @GEOIP_LIBS@ +GREP = @GREP@ +HOST_GUESS = @HOST_GUESS@ +HWLOC_CFLAGS = @HWLOC_CFLAGS@ +HWLOC_LIBS = @HWLOC_LIBS@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LEX = @LEX@ +LEXLIB = @LEXLIB@ +LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@ +LIBCAP = @LIBCAP@ +LIBCRYPT = @LIBCRYPT@ +LIBDEMANGLE = @LIBDEMANGLE@ +LIBEXPAT = @LIBEXPAT@ +LIBLZMA = @LIBLZMA@ +LIBOBJS = @LIBOBJS@ +LIBPCRE = @LIBPCRE@ +LIBPROFILER = @LIBPROFILER@ +LIBREADLINE = @LIBREADLINE@ +LIBRESOLV = @LIBRESOLV@ +LIBS = @LIBS@ +LIBTCL = @LIBTCL@ +LIBTOOL = @LIBTOOL@ +LIBTOOL_LINK_FLAGS = @LIBTOOL_LINK_FLAGS@ +LIBZ = @LIBZ@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LUA_LDFLAGS = @LUA_LDFLAGS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MGMT_DEFS = @MGMT_DEFS@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPENSSL_INCLUDES = @OPENSSL_INCLUDES@ +OPENSSL_LDFLAGS = @OPENSSL_LDFLAGS@ +OPENSSL_LIBS = @OPENSSL_LIBS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PCRE_CONFIG = @PCRE_CONFIG@ +PERL = @PERL@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PYTHON = @PYTHON@ +PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ +PYTHON_PLATFORM = @PYTHON_PLATFORM@ +PYTHON_PREFIX = @PYTHON_PREFIX@ +PYTHON_VERSION = @PYTHON_VERSION@ +RANLIB = @RANLIB@ +RM = @RM@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHARED_CFLAGS = @SHARED_CFLAGS@ +SHARED_CXXFLAGS = @SHARED_CXXFLAGS@ +SHARED_CXXLINKFLAGS = @SHARED_CXXLINKFLAGS@ +SHARED_LDFLAGS = @SHARED_LDFLAGS@ +SHELL = @SHELL@ +SPDYLAY_CFLAGS = @SPDYLAY_CFLAGS@ +SPDYLAY_LIBS = @SPDYLAY_LIBS@ +SPHINXBUILD = @SPHINXBUILD@ +SPHINXOPTS = @SPHINXOPTS@ +STRIP = @STRIP@ +TCL_BIN_DIR = @TCL_BIN_DIR@ +TCL_LIB_FILE = @TCL_LIB_FILE@ +TCL_LIB_FLAG = @TCL_LIB_FLAG@ +TCL_LIB_SPEC = @TCL_LIB_SPEC@ +TCL_PATCH_LEVEL = @TCL_PATCH_LEVEL@ +TCL_SRC_DIR = @TCL_SRC_DIR@ +TCL_STUB_LIB_FILE = @TCL_STUB_LIB_FILE@ +TCL_STUB_LIB_FLAG = @TCL_STUB_LIB_FLAG@ +TCL_STUB_LIB_SPEC = @TCL_STUB_LIB_SPEC@ +TCL_VERSION = @TCL_VERSION@ +TFW_PACKAGE_SUFFIX = @TFW_PACKAGE_SUFFIX@ +TS_LIBTOOL_VERSION = @TS_LIBTOOL_VERSION@ +TS_LUAJIT_LDFLAGS = @TS_LUAJIT_LDFLAGS@ +TS_MAN1_MANPAGES = @TS_MAN1_MANPAGES@ +TS_MAN3_MANPAGES = @TS_MAN3_MANPAGES@ +TS_MAN5_MANPAGES = @TS_MAN5_MANPAGES@ +TS_MAN8_MANPAGES = @TS_MAN8_MANPAGES@ +TS_VERSION_MAJOR = @TS_VERSION_MAJOR@ +TS_VERSION_MICRO = @TS_VERSION_MICRO@ +TS_VERSION_MINOR = @TS_VERSION_MINOR@ +TS_VERSION_NUMBER = @TS_VERSION_NUMBER@ +TS_VERSION_STRING = @TS_VERSION_STRING@ +VERSION = @VERSION@ +YACC = @YACC@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +ac_cv_sizeof_voidp = @ac_cv_sizeof_voidp@ +allocah = @allocah@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +arpa_ineth = @arpa_ineth@ +arpa_nameser_compath = @arpa_nameser_compath@ +arpa_nameserh = @arpa_nameserh@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_group = @build_group@ +build_machine = @build_machine@ +build_os = @build_os@ +build_person = @build_person@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +cpioh = @cpioh@ +ctypeh = @ctypeh@ +datadir = @datadir@ +datarootdir = @datarootdir@ +default_loopback_iface = @default_loopback_iface@ +defer_accept = @defer_accept@ +docdir = @docdir@ +dvidir = @dvidir@ +enable_remote_cov_commit = @enable_remote_cov_commit@ +endianh = @endianh@ +exec_prefix = @exec_prefix@ +execinfoh = @execinfoh@ +exp_bindir = @exp_bindir@ +exp_cachedir = @exp_cachedir@ +exp_datadir = @exp_datadir@ +exp_docdir = @exp_docdir@ +exp_exec_prefix = @exp_exec_prefix@ +exp_includedir = @exp_includedir@ +exp_infodir = @exp_infodir@ +exp_installbuilddir = @exp_installbuilddir@ +exp_libdir = @exp_libdir@ +exp_libexecdir = @exp_libexecdir@ +exp_localstatedir = @exp_localstatedir@ +exp_logdir = @exp_logdir@ +exp_mandir = @exp_mandir@ +exp_prefix = @exp_prefix@ +exp_runtimedir = @exp_runtimedir@ +exp_sbindir = @exp_sbindir@ +exp_sysconfdir = @exp_sysconfdir@ +expath = @expath@ +floath = @floath@ +gethostbyname_r_glibc2 = @gethostbyname_r_glibc2@ +has_128bit_cas = @has_128bit_cas@ +has_backtrace = @has_backtrace@ +has_in6_is_addr_unspecified = @has_in6_is_addr_unspecified@ +has_interim_cache = @has_interim_cache@ +has_ip_tos = @has_ip_tos@ +has_profiler = @has_profiler@ +has_so_mark = @has_so_mark@ +has_spdy = @has_spdy@ +has_tcmalloc = @has_tcmalloc@ +has_tests = @has_tests@ +has_wccp = @has_wccp@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +ifaddrsh = @ifaddrsh@ +includedir = @includedir@ +infodir = @infodir@ +ink_with_modules_local = @ink_with_modules_local@ +ink_with_modules_process = @ink_with_modules_process@ +install_sh = @install_sh@ +installbuilddir = @installbuilddir@ +iocore_include_dirs = @iocore_include_dirs@ +ip_transparent = @ip_transparent@ +jemalloch = @jemalloch@ +libdir = @libdir@ +libexecdir = @libexecdir@ +libgenh = @libgenh@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +lzmah = @lzmah@ +machine_endianh = @machine_endianh@ +malloch = @malloch@ +mandir = @mandir@ +mathh = @mathh@ +max_api_stats = @max_api_stats@ +max_event_threads = @max_event_threads@ +max_host_name_len = @max_host_name_len@ +max_threads_per_type = @max_threads_per_type@ +mkdir_p = @mkdir_p@ +net_ppp_defsh = @net_ppp_defsh@ +netdbh = @netdbh@ +netinet_in_systmh = @netinet_in_systmh@ +netinet_inh = @netinet_inh@ +netinet_ip_icmph = @netinet_ip_icmph@ +netinet_iph = @netinet_iph@ +netinet_tcph = @netinet_tcph@ +oldincludedir = @oldincludedir@ +pcre_pcreh = @pcre_pcreh@ +pcreh = @pcreh@ +pdfdir = @pdfdir@ +pkgbindir = @pkgbindir@ +pkgcachedir = @pkgcachedir@ +pkgdocdir = @pkgdocdir@ +pkglocalstatedir = @pkglocalstatedir@ +pkglogdir = @pkglogdir@ +pkgpyexecdir = @pkgpyexecdir@ +pkgpythondir = @pkgpythondir@ +pkgruntimedir = @pkgruntimedir@ +pkgsbindir = @pkgsbindir@ +pkgsysconfdir = @pkgsysconfdir@ +pkgsysgroup = @pkgsysgroup@ +pkgsysuser = @pkgsysuser@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pthread_nph = @pthread_nph@ +pyexecdir = @pyexecdir@ +pythondir = @pythondir@ +readline_readlineh = @readline_readlineh@ +rel_bindir = @rel_bindir@ +rel_cachedir = @rel_cachedir@ +rel_datadir = @rel_datadir@ +rel_docdir = @rel_docdir@ +rel_exec_prefix = @rel_exec_prefix@ +rel_includedir = @rel_includedir@ +rel_infodir = @rel_infodir@ +rel_installbuilddir = @rel_installbuilddir@ +rel_libdir = @rel_libdir@ +rel_libexecdir = @rel_libexecdir@ +rel_localstatedir = @rel_localstatedir@ +rel_logdir = @rel_logdir@ +rel_mandir = @rel_mandir@ +rel_prefix = @rel_prefix@ +rel_runtimedir = @rel_runtimedir@ +rel_sbindir = @rel_sbindir@ +rel_sysconfdir = @rel_sysconfdir@ +runtimedir = @runtimedir@ +sbindir = @sbindir@ +schedh = @schedh@ +sharedstatedir = @sharedstatedir@ +siginfoh = @siginfoh@ +srcdir = @srcdir@ +stroptsh = @stroptsh@ +sys_byteorderh = @sys_byteorderh@ +sys_cpuseth = @sys_cpuseth@ +sys_epollh = @sys_epollh@ +sys_eventh = @sys_eventh@ +sys_ioctlh = @sys_ioctlh@ +sys_mounth = @sys_mounth@ +sys_paramh = @sys_paramh@ +sys_pseth = @sys_pseth@ +sys_sockioh = @sys_sockioh@ +sys_sysctlh = @sys_sysctlh@ +sys_sysinfoh = @sys_sysinfoh@ +sys_sysmacrosh = @sys_sysmacrosh@ +sys_systeminfoh = @sys_systeminfoh@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +use_diags = @use_diags@ +use_epoll = @use_epoll@ +use_fast_sdk = @use_fast_sdk@ +use_freelist = @use_freelist@ +use_hwloc = @use_hwloc@ +use_kqueue = @use_kqueue@ +use_linux_native_aio = @use_linux_native_aio@ +use_port = @use_port@ +use_posix_cap = @use_posix_cap@ +use_reclaimable_freelist = @use_reclaimable_freelist@ +use_tls_alpn = @use_tls_alpn@ +use_tls_eckey = @use_tls_eckey@ +use_tls_npn = @use_tls_npn@ +use_tls_sni = @use_tls_sni@ +use_tproxy = @use_tproxy@ +valuesh = @valuesh@ +waith = @waith@ +zlibh = @zlibh@ + +# Local Macros +# http://www.gnu.org/software/automake/manual/automake.html#Local-Macros +ACLOCAL_AMFLAGS = -I build + +# TODO: There's still some weirdness in the dependencies between proxy +# and mgmt, hence we have to build proxy/hdrs first. + +# proxy/api/ts has to be built first, since so much of libraries and "core +# depends on the generates ts/ts.h include file. +SUBDIRS = proxy/api/ts iocore lib proxy/hdrs proxy/shared mgmt proxy cmd plugins tools example rc doc +DIST_BUILD_USER = `id -nu` +DIST_BUILD_USER_GROUP = `id -ng` +DISTCHECK_CONFIGURE_FLAGS = --with-user=${DIST_BUILD_USER} --with-group=${DIST_BUILD_USER_GROUP} +EXTRA_DIST = CHANGES INSTALL STATUS NOTICE LAYOUT LICENSE example contrib README-EC2 REVIEWERS + +# Default value when creating release candidates +RC = 0 +all: all-recursive + +.SUFFIXES: +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): +lib/perl/lib/Apache/TS.pm: $(top_builddir)/config.status $(top_srcdir)/lib/perl/lib/Apache/TS.pm.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool config.lt + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscope: cscope.files + test ! -s cscope.files \ + || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) +clean-cscope: + -rm -f cscope.files +cscope.files: clean-cscope cscopelist +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + -rm -f cscope.out cscope.in.out cscope.po.out cscope.files + +distdir: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + $(am__post_remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__post_remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__post_remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__post_remove_distdir) + +dist-tarZ: distdir + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__post_remove_distdir) + +dist-shar: distdir + shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz + $(am__post_remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__post_remove_distdir) + +dist dist-all: + $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' + $(am__post_remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir) + chmod u+w $(distdir) + mkdir $(distdir)/_build $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build \ + && ../configure --srcdir=.. --prefix="$$dc_install_base" \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__post_remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am +check: check-recursive +all-am: Makefile +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-libtool \ + distclean-local distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + @$(NORMAL_INSTALL) + $(MAKE) $(AM_MAKEFLAGS) install-data-hook +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: installcheck-local + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-data-am \ + install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ + am--refresh check check-am clean clean-cscope clean-generic \ + clean-libtool cscope cscopelist-am ctags ctags-am dist \ + dist-all dist-bzip2 dist-gzip dist-lzip dist-shar dist-tarZ \ + dist-xz dist-zip distcheck distclean distclean-generic \ + distclean-libtool distclean-local distclean-tags \ + distcleancheck distdir distuninstallcheck dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-data-hook install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installcheck-local installdirs \ + installdirs-am maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am tags tags-am uninstall uninstall-am + + +export CCACHE_BASEDIR + +# igalic can't remember if this is make check or make test and neither should you. +test: check + +installcheck-local: + $(DESTDIR)$(bindir)/traffic_server -R 1 + +distclean-local: + -rm -f config.nice + +doxygen: + @cd doc && $(MAKE) $(AM_MAKEFLAGS) $@ + +asf-dist: asf-distdir + tardir=$(distdir) && $(am__tar) --mtime=./configure.ac | bzip2 -9 -c >$(distdir).tar.bz2 + @$(am__remove_distdir) + +asf-dist-rc: asf-distdir + tardir=$(distdir) && $(am__tar) --mtime=./configure.ac | bzip2 -9 -c >$(distdir)-rc$(RC).tar.bz2 + @$(am__remove_distdir) + +asf-distdir: + @$(am__remove_distdir) + test -d .git && git clone . $(distdir) + cd $(distdir) && git submodule update --init && autoreconf -i + rm -rf -- $(distdir)/autom4te.cache $(distdir)/.git $(distdir)/.gitignore $(distdir)/ci + +asf-dist-sign: asf-dist + md5sum -b $(distdir).tar.bz2 >$(distdir).tar.bz2.md5 + sha1sum -b $(distdir).tar.bz2 >$(distdir).tar.bz2.sha1 + gpg --armor --output $(distdir).tar.bz2.asc --detach-sig $(distdir).tar.bz2 + +asf-dist-sign-rc: asf-dist-rc + md5sum -b $(distdir)-rc$(RC).tar.bz2 >$(distdir)-rc$(RC).tar.bz2.md5 + sha1sum -b $(distdir)-rc$(RC).tar.bz2 >$(distdir)-rc$(RC).tar.bz2.sha1 + gpg --armor --output $(distdir)-rc$(RC).tar.bz2.asc --detach-sig $(distdir)-rc$(RC).tar.bz2 + +release: asf-dist-sign + git tag -fs -m "Release $(VERSION)" $(VERSION) + +rel-candidate: asf-dist-sign-rc + git tag -fs -m "Release Candidate $(VERSION)-rc$(RC)" $(VERSION)-rc$(RC) + +examples: all + @cd example && $(MAKE) $(AM_MAKEFLAGS) + +install-examples: examples + @cd example && $(MAKE) $(AM_MAKEFLAGS) install pkglibdir=$(pkglibexecdir) + +install-data-hook: + @cd doc && $(MAKE) $(AM_MAKEFLAGS) install-man + +rat: + java -jar $(top_srcdir)/ci/apache-rat-0.11-SNAPSHOT.jar -E $(top_srcdir)/ci/rat-regex.txt -d $(top_srcdir) + +submodules: + git submodule update --init + +help: + @echo 'all default target for building the package' && \ + echo 'check run the test suite, if any' && \ + echo 'clean remove whatever make created' && \ + echo 'distclean remove whatever configure created' && \ + echo 'dist DEPRECATED: recreate source package' && \ + echo 'examples make examples' && \ + echo 'asf-dist recreate source package' && \ + echo 'asf-dist-sign recreate source package, with checksums and signature' && \ + echo 'release recreate a signed release source package and a signed git tag' && \ + echo 'rel-candidate recreate a signed relelease candidate source package and a signed git tag' && \ + echo 'distcheck verify dist by performing VPATH build and then distclean' && \ + echo 'rat produce a RAT licence compliance report of the source' && \ + echo 'submodules update the git submodules' && \ + echo 'doxygen generate doxygen docs in doc/html dir' && \ + echo 'help display this list of make targets' && \ + echo 'install install by copying the built files to system-wide dirs' && \ + echo 'install-strip same as install but then strips debugging symbols' && \ + echo 'install-examples install examples by copying the built files to system-wide dirs' + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..3083af3a --- /dev/null +++ b/NOTICE @@ -0,0 +1,63 @@ +Apache Traffic Server +Copyright 2014 The Apache Software Foundation + +This product includes software developed at + - The Apache Software Foundation (http://www.apache.org/). + - Yahoo! Inc + - Network Geographics (http://network-geographics.com) + - OmniTI + - Comcast + - LinkedIn + - Mike Pall + +~~~ + +Codebase originally donated by Yahoo: + +Copyright (C) 2009 Yahoo! Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +~~~ + +lib/wccp and lib/tsconfig developed by Network Geographics. +Copyright (C) 2010 Network Geographics, Inc. + +~~~ + +WebSocket support developed by LinkedIn +Copyright (C) 2014 LinkedIn + +~~~ + +Cacheurl Plugin developed by OmniTI on behalf of Google. +Copyright (C) 2011 Google Inc. + +~~~ + +stale_while_revalidate Plugin developed by OmniTI on behalf of Oregon Health & Science University +Copyright (C) 2012 Oregon Health & Science University + +~~~ + +healthcheck Plugin developed by GoDaddy. +Copyright (C) 2012 GoDaddy. + +~~~ + +lib/atscppapi developed by LinkedIn +Copyright (c) 2013 LinkedIn + +~~~ + +remap_stats and url_sig plugins developed by Comcast. +Copyright (C) 2014 Comcast + +~~~ + +LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/ +Copyright (C) 2005-2014 Mike Pall. All rights reserved. diff --git a/README b/README new file mode 100644 index 00000000..363e7f47 --- /dev/null +++ b/README @@ -0,0 +1,283 @@ +Apache Traffic Server + +Traffic Server is a high-performance building block for cloud services. +It's more than just a caching proxy server; it also has support for +plugins to build large scale web applications. + +1. DIRECTORY STRUCTURE + + traffic/ ............... top src dir + |-- ci/ ................ quality assurance and other CI tools and configs + |-- cmd/ ............... various command applications + |-- traffic_cop/ ... traffic_cop application + |-- traffic_line/ .. Command line utilities and API + |-- traffic_shell/ . traffic_shell application + |-- traffic_top/ ... traffic_top application + |-- contrib ............ various contributed auxiliary pieces + |-- doc/ ............... + |-- admin/ ......... Admin guide documentations + |-- dot/ ........... Graphviz source files for docs pictures + |-- man/ ........... Unix style man-pages + |-- plugins/ ....... Documentation for our core plugins + |-- sdk/ ........... Plugin developer guide + |-- example/ ........... example plugins + |-- install/ ........... installation programs and scripts + |-- iocore/ ............ + |-- aio/ ........... Asynchronous I/O core + |-- cache/ ......... Disk and RAM cache + |-- cluster/ ....... Cache and configuration clustering + |-- dns/ ........... DNS (asynchronous) + |-- hostdb/ ........ Internal DNS cache + |-- net/ ........... Network + |-- lib/ ............... + |-- cpp11api ....... C++ '11 wrapper for plugin developers + |-- records/ ....... library for config files + |-- perl/ .......... Perl libraries for e.g. mgmt access and configurations + |-- ts/ ............ Base / core library + |-- tsconfig/....... New config parser and library (experimental) + |-- wccp/ .......... WCCP implementation + |-- m4/ ................ custom macros for configure.ac + |-- mgmt/ .............. Management server and tools (including traffic_manager) + |-- plugins/ ........... Core plugins (stable and experiemental) + |-- proxy/ ............. HTTP proxy logic + |-- api/ ........... Plugin API includes + |-- config/ ........ Configurations + |-- congest/ ....... Congestion control + |-- hdrs/ .......... Headers parsing and management + |-- http/ .......... The actual HTTP protocol implementation + |-- logging/ ....... Flexible logging + |-- README ............. intro, links, build info + |-- README-EC2 ......... info on EC2 support + |-- REVIEWERS .......... (incomplete) list of areas with committer interest + |-- LICENSE ............ full license text + `-- NOTICE ............. copyright notices + +2. REQUIREMENTS + + This section outlines build requirements for different OS + distributions. This may be out of date compared to the on-line + requirements at + + . + + Fedora 12-19 / CentOS / RHEL: + autoconf + automake + pkgconfig + libtool + perl-ExtUtils-MakeMaker + gcc-c++ + glibc-devel + openssl-devel + tcl-devel + expat-devel + pcre + pcre-devel + libcap-devel (optional, highly recommended) + flex (optional, needed for e.g. WCCP) + hwloc-devel (optional, highly recommended) + lua-devel (optional) + + Fedora 8 EC2 (Kernel: 2.6.21.7-2.ec2.v1.2.fc8xen): + FC8 on EC2 requires unique configure declarations, please see that section + below. + + NOTE: Alternately you may use the install scripts in 'contrib' which + automate this or use the pre-built public Fedora Core 8 EC2 AMI + with ATS installed. Both of which are HIGHLY RECOMMENDED. + See 'README-EC2' for further details. + + autoconf + automake + pkgconfig + libtool + perl-ExtUtils-MakeMaker + gcc-c++ + glibc-devel + openssl-devel + tcl-devel + expat-devel + pcre + + Ubuntu 8.* - 13.*: + autoconf + automake + pkg-config + libtool + libmodule-install-perl + g++ + libssl-dev + tcl-dev + expat + libexpat-dev + libpcre3-dev + libcap-dev (optional, highly recommended) + flex (optional, required for e.g. WCCP) + libhwloc-dev (optional, highly recommended) + liblua5.1-dev or libluajit-5.1-dev (optional) + libncurses5-dev (optional, required for e.g.: tstop) + libcurl4-openssl-dev (optional, required for e.g.: tstop) + + Ubuntu EC2 9.10: + NOTE: Alternately you may use the install scripts in 'contrib' which + automate this or use the pre-built public Ubuntu EC2 AMI with + ATS installed. Both of which are HIGHLY RECOMMENDED. + See 'README-EC2' for further details. + + Install these packages: + autoconf + automake + pkg-config + libtool + libmodule-install-perl + g++ + libssl-dev + tcl-dev + expat + libexpat-dev + libpcre3-dev + flex (optional) + libhwloc-dev (optional, highly recommended) + liblua-dev (optional) + libncurses5-dev (optional, required for e.g.: tstop) + libcurl3-openssl-dev (optional, required for e.g.: tstop) + + FreeBSD 7.2,8.0,9.0,9.1 + GCC 4.2.1 compiler suite or clang. You can install the following + packages either from ports or via pkg_add or pkgng: + devel/gmake + devel/autoconf + devel/automake + devel/pkgconf + devel/libtool + lang/tcl85 + textproc/expat (or expat2) + security/openssl + devel/pcre + textproc/flex (optional, install newer version from ports, fix PATH) + devel/hwloc (optional, highly recommended) + lang/lua (optional, on x64 don't use LuaJIT!) + + OmniOS: + developer/gcc46 + developer/build/gnu-make + developer/build/autoconf + developer/build/automake-111 + developer/build/libtool + omniti/runtime/tcl-8 + library/expat + library/security/openssl + library/pcre + + OSX 10.5: + GCC 4.2.4 compiler suite (ports collection) + autoconf + automake + pkgconfig + libtool + tcl + expat + openssl + pcre + +3. Building from distribution + +You can download the latest source code from the official Apache Traffic +Server site: + + http://trafficserver.apache.org/downloads + +(or via the URL shortener: http://s.apache.org/uG). Once downloaded, +follow the instructions: + + tar xf trafficserver-2.1.8-unstable.tar.bz2 + cd trafficserver-2.1.8-unstable + ./configure # configure the build environment to create Makefiles + make # execute the compile + +This will build with a destination prefix of /usr/local. You can finish +the installation with + + sudo make install + +4. BUILDING FROM GIT REPO + + mkdir -p ~/dev # make yourself a development dir + cd ~/dev # enter your development dir + git clone ... # get the source code from ASF Git repo + cd trafficserver # enter the checkout dir + autoreconf -if # generate the configure script and Makefile.in files + ./configure # configure the build environment to create Makefiles + make # execute the compile + +4b. Instructions for building on EC2 + NOTE: Alternately you may use the scripts under 'contrib' which will + automate the install for trafficserver under EC2 which is + HIGHLY RECOMMENDED. See 'README-EC2' for further details. + + # As root do the following when using Ubuntu + mkdir -p /mnt #EC2 Storage Mount, where storage is located + cd /mnt + git clone ... # get the source code from ASF Git repo + cd trafficserver # enter the checkout dir + autoreconf -i --force # generate the configure script and Makefile.in files + ./configure + make + + # As root do the following when using Fedora Core 8 kernel + mkdir -p /mnt #EC2 Storage Mount, where storage is located + cd /mnt + git clone ... # get the source code from ASF Git repo + cd trafficserver # enter the checkout dir + autoreconf -i --force # generate the configure script and Makefile.in files + ./configure --disable-eventfd + make + +4c. Instructions for building on FreeBSD + + The only difference is how to run configure and make: + + MAKE=gmake ./configure # make sure that gmake is the make we use + gmake + +5. INSTALLATION + + DEFAULT DIR CONTENTS + /usr/local/var/log/trafficserver log files created at runtime + /usr/local/var/trafficserver runtime files + /usr/local/etc/trafficserver configuration files + /usr/local/bin executable binaries + /usr/local/libexec/trafficserver plugins + +6. CRYPTO NOTICE + + This distribution includes cryptographic software. The country in + which you currently reside may have restrictions on the import, + possession, use, and/or re-export to another country, of + encryption software. BEFORE using any encryption software, please + check your country's laws, regulations and policies concerning the + import, possession, or use, and re-export of encryption software, to + see if this is permitted. See for more + information. + + The U.S. Government Department of Commerce, Bureau of Industry and + Security (BIS), has classified this software as Export Commodity + Control Number (ECCN) 5D002.C.1, which includes information security + software using or performing cryptographic functions with asymmetric + algorithms. The form and manner of this Apache Software Foundation + distribution makes it eligible for export under the License Exception + ENC Technology Software Unrestricted (TSU) exception (see the BIS + Export Administration Regulations, Section 740.13) for both object + code and source code. + + The following provides more details on the included cryptographic + software: + + The functionality of OpenSSL is + utilized in parts of the software. + +7. ADDITIONAL INFO + + Web page: http://trafficserver.apache.org/ + Wiki: http://cwiki.apache.org/confluence/display/TS/ + User mailing list: users@trafficserver.apache.org diff --git a/README-EC2 b/README-EC2 new file mode 100644 index 00000000..52c84491 --- /dev/null +++ b/README-EC2 @@ -0,0 +1,134 @@ +Apache TrafficServer AMIs for EC2 +Author: Jason Giedymin + +--TOC-- +1.0 Ubuntu Based Images +1.1 Ubuntu AMI Details +1.2 What is installed? + +2.0 Fedora Based Images +2.1 Fedora AMI Details +2.2 What is installed? + +3.0 The ATS Installation +3.1 Additional Update Functionality +3.2 User customized settings +3.3 Script Breakout + +4.0 FAQ +4.1 I use EC2 and I'm still confused!? +4.2 What is EC2? +4.3 I don't use EC2 or have an AWS account, can I still use these scripts on + my Virtual Machine or dedicated Ubuntu/Fedora Linux Box? +4.4 How do I contact you for help? +4.5 Will these AMIs be updated in the future? + +------- + +1.0 Ubuntu Based Images + This is the prefered image as Ubuntu is kept up to date by Canonical. + +1.1 Ubuntu AMI Details + Name: Amuxbit-Karmic-ApacheTrafficServer-AMI + x86_64 AMI: ami-ada24cc4, kernel: aki-fd15f694, Ramdisk: ari-7b739e12 + i386 AMI: ami-5ba34d32, kernel: aki-5f15f636, Ramdisk: ari-d5709dbc + +1.2 What is installed? + All packages other than Apache TrafficServer are from the ec2 ubuntu + package repositories. All software installed should be open source. + * Official Canonical Ubuntu Karmic 9.10 EC2 AMI + * Dist Upgraded as of 4/1/2010 + * Apache TrafficServer - Trunk (can be updated at any time) + * openjdk-6-jre (for ec2 tools) + * ruby (for ec2 tools) + * libopenssl-ruby (for ec2 tools) + * Apache2 (httpd, used for testing) + +2.0 Fedora Based Images + +2.1 Fedora AMI Details + Name: Amuxbit-Fedora8-ApacheTrafficServer-AMI + x86_64 AMI: ami-c3a04eaa, kernel: aki-a3d737ca, Ramdisk: ari-7cb95a15 + i386 AMI: ami-53a04e3a, kernel: aki-6eaa4907, Ramdisk: ari-42b95a2b + +2.2 What is installed? + All packages other than Apache TrafficServer are from the ec2 fedora + package repositories. All software installed should be open source. + * Official Fedora Core 8 EC2 AMI + * Dist Upgraded as of 4/1/2010 + * Apache TrafficServer - Trunk (can be updated at any time) + * java-1.7.0-icedtea (openjdk for ec2 tools) + * openssl-devel (for ec2 tools) + * ruby (for ec2 tools) + * httpd (Apache web server, used for testing) + +3.0 The ATS Installation + Apache Traffic Server is installed in: '/ats'. + The installation script used to install ATS is contained within the contrib + directory, part of the ATS source distribution. The script is included + within the '/ats' folder for actual use. + +3.1 Additional Update Functionality + The EC2 images provided contain additional scripts to maintain freshness + of the install scripts. At one time the scripts provided 'auto-heal' + functionality but that has been removed. Running /ats/install_update.sh + will keep the ATS installation up to date. + +3.2 User customized settings + Any customized settings will be lost if the update/install scripts are run. + Upon executing the install/update scripts you may copy your files back into + the config directory. Users should periodically backup your config files, + and keep abrest of any config changes as new versions of ATS are released. + The latter reason being why I did not auto-copy configs. While I've tried + to lessen as much pain for any Sys Admins, ultimately you must take charge + of these critical settings as updates will and do happen frequently to the + ATS source. + + An example script which alters settings can be found in + '/ats/contrib/set_trafficserver.sh' + This has been used on the AMIs already provided. + +3.3 Script Breakout + + ats + |-- contrib + | -- install_trafficserver.sh : installs ATS. Try not to run this directly. + | -- set_trafficserver.sh : sample config/testing/benchmark base script. + You may want to learn from/use this script. + ` -- update_scripts.sh : used to update install_trafficserver.sh, + and set_trafficserver.sh from trunk. + |-- install_update.sh : the 'easy button' script to re-install ATS. + Executes update_scripts.sh, then install_trafficserver.sh. + This is what you want to run. + `-- readme : extra information. + +4.0 FAQ + +4.1 I use EC2 and I'm still confused!? + With your EC2 account, refer to one of the AMIs listed in this doc + (Ubuntu/Fedora). Image architectures provided are i386, and x86_64. + 64bit is offered for those who need to spin up a big high powered instances. + Once running (and it may take a while) you will find yourself with a Linux + distribution with Apache Traffic Server already installed. If you'd like + to re-install ATS against trunk, run '/ats/install_update.sh'. Note that + you will have to backup and/or optimize your configuration (see 3.2). + To further setup ATS see the TrafficServer wiki @ + + http://cwiki.apache.org/confluence/display/TS/ + +4.2 What is EC2? + See http://aws.amazon.com/ + +4.3 I don't use EC2 or have an AWS account, can I still use these scripts on + my Virtual Machine or dedicated Ubuntu/Fedora Linux Box? + Absolutely! The /contrib scripts are actually meant for Ubuntu/Linux + Installs first, EC2 second. You can find them on the ATS SVN/Git + repository under '/contrib'. + +4.4 How do I contact you for help? + For mailing lists, and IRC information, visit our web page at + + http://trafficserver.apache.org/ + +4.5 Will these AMIs be updated in the future? + Absolutely! diff --git a/REVIEWERS b/REVIEWERS new file mode 100644 index 00000000..15d89aca --- /dev/null +++ b/REVIEWERS @@ -0,0 +1,117 @@ +These are Committers who have expressed general interest in the listed parts +of the Traffic Server code to the extent that they will review or at least read +over changes in those parts and should review major changes. + +All changes should have a jira ticket: + https://issues.apache.org/jira/browse/TS + +Big changes should be discussed on one or more of: + mailing list: dev@trafficserver.apache.org + IRC: #traffic-server irc.freenode.net + +Major or controversial changes should be have wiki page and may require a vote: + http://cwiki.apache.org/confluence/display/TS/Traffic+Server + +Committers: add modules as needed and any qualifications after your e-mail address +==================================================================================== + +all/general interest + jplevyak@apache.org + bcall@apache.org + briang@apache.org + mturk@apache.org + zwoop@apache.org + jim@apache.org + ericb@apache.org + manjesh@apache.org +lib/ts + jplevyak@apache.org + zwoop@apache.org + bcall@apache.org + georgep@apache.org + ericb@apache.org +lib/records + bcall@apache.org + georgep@apache.org + zwoop@apache.org +Event System/Buffering/VIO/VConnection + jplevyak@apache.org + bcall@apache.org + briang@apache.org + georgep@apache.org +Network I/O + jplevyak@apache.org - not including SSL or UDP except where it intersects the other code + bcall@apache.org + georgep@apache.org + briang@apache.org + jim@apache.org + zwoop@apache.org +Raw Cache and AIO + jplevyak@apache.org + bcall@apache.org + georgep@apache.org +HTTP Caching + bcall@apache.org + jim@apache.org + zwoop@apache.org +Block-Cache +Clustering + jplevyak@apache.org + georgep@apache.org + jim@apache.org + zym@apache.org +DNS/HostDB + jplevyak@apache.org + zwoop@apache.org + bcall@apache.org + georgep@apache.org + ericb@apache.org +FastIO +Docs + dianes@apache.org +Examples/Plugins + zwoop@apache.org + bcall@apache.org + briang@apache.org + ericb@apache.org +HDRs + bcall@apache.org + zwoop@apache.org + briang@apache.org +HTTP + bcall@apache.org + zwoop@apache.org + briang@apache.org + jim@apache.org + ericb@apache.org +Remap + zwoop@apache.org + bcall@apache.org + jim@apache.org + ericb@apache.org +API + zwoop@apache.org + briang@apache.org + bcall@apache.org + georgep@apache.org + ericb@apache.org +Config + zwoop@apache.org + bcall@apache.org + georgep@apache.org +Congest +Logging + georgep@apache.org +MGMT + georgep@apache.org +Stats + bcall@apache.org + georgep@apache.org + briang@apache.org + zwoop@apache.org +Build System + zwoop@apache.org + bcall@apache.org + georgep@apache.org + mturk@apache.org + jim@apache.org diff --git a/STATUS b/STATUS new file mode 100644 index 00000000..aaad22e9 --- /dev/null +++ b/STATUS @@ -0,0 +1,65 @@ +Traffic Server 4.2 STATUS: -*-text-*- +Last modified at [$Date$] + +The current version of this file can be found at: + + * http://git-wip-us.apache.org/repos/asf?p=trafficserver.git;a=blob;f=STATUS;hb=master + + +Release history: + + 4.1.2 : Released on Dec 17th, 2013 + + 4.0.2 : Released on Oct 14th, 2013 + 4.0.1 : Released on Aug 30th, 2013 + + 3.3.5 : Released on Aug 16th, 2013 + 3.3.4 : Released on June 10th, 2013 + 3.3.2 : Released on April 18th, 2013 + 3.3.1 : Released on March 12th, 2013 + 3.3.0 : Released on September 7th, 2012 + + 3.2.4 : Released on January 20, 2013 + 3.2.0 : Released on June 20, 2012 + 3.0.5 : Released on June 06, 2012 + 3.0.4 : Released on March 21, 2012 + 3.0.3 : Released on February 13, 201 + 3.0.2 : Released on December 07, 2011 + 3.0.1 : Released on July 19, 2011 + 3.0.0 : Released on June 14, 2011 + 2.0.1 : Released on August 31, 2010 + 2.0.0 : Released on April 28, 2010 + + +Contributors looking for a mission: + + * Just do an egrep on "TODO" or "XXX" in the source. + + * Review the bug database at: https://issues.apache.org/jira/browse/TS + + - Download, test and review patches attached to open bugs + + * Open bugs in the bug database. + + +UPCOMING WORK: + +A list of all bugs open for the next release can be found at + + http://s.apache.org/ats-next + + +RELEASE PROCESS + +The Traffic Server release process and support cycles are documented at + + https://cwiki.apache.org/confluence/display/TS/ReleaseProcess + +and + + https://cwiki.apache.org/confluence/display/TS/New+Release+Processes + + +Current status of all build systems is available at + + http://ci.trafficserver.apache.org/ diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 00000000..594ccf99 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,142 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +$network = { + # The VM host is 192.168.100.1 + "raring64" => "192.168.100.11", + "quantal64" => "192.168.100.12", + "precise64" => "192.168.100.13", + "lucid64" => "192.168.100.14", + "centos63" => "192.168.100.15", + "freebsd" => "192.168.100.16", + "omnios" => "192.168.100.17", + + "raring32" => "192.168.200.11", + "quantal32" => "192.168.200.12", + "precise32" => "192.168.200.13", + "lucid32" => "192.168.200.14", + + "fedora18" => "192.168.200.15", + "centos59" => "192.168.200.16", + "centos64" => "192.168.200.17", + "debian7" => "192.168.200.18", + "sles11" => "192.168.200.19", + "oel63" => "192.168.200.20", + + "saucy64" => "192.168.100.21", + "saucy32" => "192.168.100.22", + "trusty64" => "192.168.100.23", + "trusty32" => "192.168.100.24", +} + +$vmspec = { + "lucid64" => [ # Ubuntu 10.04 LTS (Lucid Lynx) + "http://files.vagrantup.com/lucid64.box", "debian.pp" + ], + "fedora18" => [ + "http://puppet-vagrant-boxes.puppetlabs.com/fedora-18-x64-vbox4210.box", "redhat.pp" + ], + "centos63" => [ + "https://dl.dropbox.com/u/7225008/Vagrant/CentOS-6.3-x86_64-minimal.box", "redhat.pp" + ], + "centos59" => [ + "http://puppet-vagrant-boxes.puppetlabs.com/centos-59-x64-vbox4210.box", "redhat.pp", + ], + "centos64" => [ + "http://puppet-vagrant-boxes.puppetlabs.com/centos-64-x64-vbox4210.box", "redhat.pp", + ], + "debian7" => [ + "http://puppet-vagrant-boxes.puppetlabs.com/debian-70rc1-x64-vbox4210.box", "debian.pp", + ], + "sles11" => [ + "http://puppet-vagrant-boxes.puppetlabs.com/sles-11sp1-x64-vbox4210.box", "redhat.pp", + ], + "oel63" => [ + "http://ats.boot.org/vagrant/vagrant-oel63-x64.box", "redhat.pp", + ], +} + +Vagrant.configure("2") do |config| + + # Default all VMs to 1GB. + config.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--memory", 1024] + end + + # Mount the Traffic Server source code in a fixed location everywhere. Use NFS + # because it's faster and vboxfs doesn't support links. + config.vm.synced_folder ".", "/opt/src/trafficserver.git", :nfs => true + + # Always forward SSH keys to VMs. + config.ssh.forward_agent = true + + # Ubuntu 14.04 (Trusty Tahr) + # Ubuntu 13.04 (Raring Ringtail) + # Ubuntu 12.10 (Quantal Quetzal) + # Ubuntu 12.04 LTS (Precise Pangolin) + ["i386", "amd64"].each { |arch| + ['saucy', 'raring', 'quantal', 'precise', 'trusty' ].each { |release| + n = { "i386" => "32", "amd64" => "64" }[arch] + config.vm.define "#{release}#{n}" do | config | + config.vm.box = "#{release}#{n}" + config.vm.box_url = "http://cloud-images.ubuntu.com/vagrant/#{release}/current/#{release}-server-cloudimg-#{arch}-vagrant-disk1.box" + config.vm.network :private_network, ip: $network["#{release}#{n}"] + config.vm.provision :puppet do |puppet| + puppet.manifests_path = "contrib/manifests" + puppet.manifest_file = "debian.pp" + end + end + } + } + + config.vm.define :freebsd do | config | + config.vm.box = "freebsd" + config.vm.synced_folder ".", "/opt/src/trafficserver.git", :nfs => false + # Force the FreeBSD VM to use a network driver that actually works. + config.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--nictype1", "82543GC"] + v.customize ["modifyvm", :id, "--nictype2", "82543GC"] + end + config.vm.network :private_network, ip: $network["freebsd"] + config.vm.box_url = "https://github.com/downloads/xironix/freebsd-vagrant/freebsd_amd64_zfs.box" + end + + # Current OmniOS release, see http://omnios.omniti.com/wiki.php/Installation + config.vm.define :omnios do | config | + config.vm.box = "omnios" + config.vm.guest = :solaris + config.vm.network :private_network, ip: $network["omnios"] + config.vm.synced_folder ".", "/opt/src/trafficserver.git", :nfs => false + config.vm.box_url = "http://omnios.omniti.com/media/omnios-latest.box" + config.vm.provision :shell, :path => "contrib/manifests/omnios.sh" + end + + $vmspec.each do | name, spec | + config.vm.define name do | config | + config.vm.box = name + config.vm.box_url = spec[0] + config.vm.network :private_network, ip: $network[name] + config.vm.provision :puppet do |puppet| + puppet.manifests_path = "contrib/manifests" + puppet.manifest_file = spec[1] + end + end + end + +end + +# -*- mode: ruby -*- +# vi: set ft=ruby : diff --git a/aclocal.m4 b/aclocal.m4 new file mode 100644 index 00000000..5a3b4e15 --- /dev/null +++ b/aclocal.m4 @@ -0,0 +1,1428 @@ +# generated automatically by aclocal 1.13.4 -*- Autoconf -*- + +# Copyright (C) 1996-2013 Free Software Foundation, Inc. + +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, +[m4_warning([this file was generated for autoconf 2.69. +You have another version of autoconf. It may work, but is not guaranteed to. +If you have problems, you may need to regenerate the build system entirely. +To do so, use the procedure documented by the package, typically 'autoreconf'.])]) + +# Copyright (C) 2002-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_AUTOMAKE_VERSION(VERSION) +# ---------------------------- +# Automake X.Y traces this macro to ensure aclocal.m4 has been +# generated from the m4 files accompanying Automake X.Y. +# (This private macro should not be called outside this file.) +AC_DEFUN([AM_AUTOMAKE_VERSION], +[am__api_version='1.13' +dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to +dnl require some minimum version. Point them to the right macro. +m4_if([$1], [1.13.4], [], + [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl +]) + +# _AM_AUTOCONF_VERSION(VERSION) +# ----------------------------- +# aclocal traces this macro to find the Autoconf version. +# This is a private macro too. Using m4_define simplifies +# the logic in aclocal, which can simply ignore this definition. +m4_define([_AM_AUTOCONF_VERSION], []) + +# AM_SET_CURRENT_AUTOMAKE_VERSION +# ------------------------------- +# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. +# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. +AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], +[AM_AUTOMAKE_VERSION([1.13.4])dnl +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) + +# Copyright (C) 2011-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_AR([ACT-IF-FAIL]) +# ------------------------- +# Try to determine the archiver interface, and trigger the ar-lib wrapper +# if it is needed. If the detection of archiver interface fails, run +# ACT-IF-FAIL (default is to abort configure with a proper error message). +AC_DEFUN([AM_PROG_AR], +[AC_BEFORE([$0], [LT_INIT])dnl +AC_BEFORE([$0], [AC_PROG_LIBTOOL])dnl +AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([ar-lib])dnl +AC_CHECK_TOOLS([AR], [ar lib "link -lib"], [false]) +: ${AR=ar} + +AC_CACHE_CHECK([the archiver ($AR) interface], [am_cv_ar_interface], + [am_cv_ar_interface=ar + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int some_variable = 0;]])], + [am_ar_try='$AR cru libconftest.a conftest.$ac_objext >&AS_MESSAGE_LOG_FD' + AC_TRY_EVAL([am_ar_try]) + if test "$ac_status" -eq 0; then + am_cv_ar_interface=ar + else + am_ar_try='$AR -NOLOGO -OUT:conftest.lib conftest.$ac_objext >&AS_MESSAGE_LOG_FD' + AC_TRY_EVAL([am_ar_try]) + if test "$ac_status" -eq 0; then + am_cv_ar_interface=lib + else + am_cv_ar_interface=unknown + fi + fi + rm -f conftest.lib libconftest.a + ]) + ]) + +case $am_cv_ar_interface in +ar) + ;; +lib) + # Microsoft lib, so override with the ar-lib wrapper script. + # FIXME: It is wrong to rewrite AR. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__AR in this case, + # and then we could set am__AR="$am_aux_dir/ar-lib \$(AR)" or something + # similar. + AR="$am_aux_dir/ar-lib $AR" + ;; +unknown) + m4_default([$1], + [AC_MSG_ERROR([could not determine $AR interface])]) + ;; +esac +AC_SUBST([AR])dnl +]) + +# Figure out how to run the assembler. -*- Autoconf -*- + +# Copyright (C) 2001-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_AS +# ---------- +AC_DEFUN([AM_PROG_AS], +[# By default we simply use the C compiler to build assembly code. +AC_REQUIRE([AC_PROG_CC]) +test "${CCAS+set}" = set || CCAS=$CC +test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS +AC_ARG_VAR([CCAS], [assembler compiler command (defaults to CC)]) +AC_ARG_VAR([CCASFLAGS], [assembler compiler flags (defaults to CFLAGS)]) +_AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl +]) + +# AM_AUX_DIR_EXPAND -*- Autoconf -*- + +# Copyright (C) 2001-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets +# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to +# '$srcdir', '$srcdir/..', or '$srcdir/../..'. +# +# Of course, Automake must honor this variable whenever it calls a +# tool from the auxiliary directory. The problem is that $srcdir (and +# therefore $ac_aux_dir as well) can be either absolute or relative, +# depending on how configure is run. This is pretty annoying, since +# it makes $ac_aux_dir quite unusable in subdirectories: in the top +# source directory, any form will work fine, but in subdirectories a +# relative path needs to be adjusted first. +# +# $ac_aux_dir/missing +# fails when called from a subdirectory if $ac_aux_dir is relative +# $top_srcdir/$ac_aux_dir/missing +# fails if $ac_aux_dir is absolute, +# fails when called from a subdirectory in a VPATH build with +# a relative $ac_aux_dir +# +# The reason of the latter failure is that $top_srcdir and $ac_aux_dir +# are both prefixed by $srcdir. In an in-source build this is usually +# harmless because $srcdir is '.', but things will broke when you +# start a VPATH build or use an absolute $srcdir. +# +# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, +# iff we strip the leading $srcdir from $ac_aux_dir. That would be: +# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` +# and then we would define $MISSING as +# MISSING="\${SHELL} $am_aux_dir/missing" +# This will work as long as MISSING is not called from configure, because +# unfortunately $(top_srcdir) has no meaning in configure. +# However there are other variables, like CC, which are often used in +# configure, and could therefore not use this "fixed" $ac_aux_dir. +# +# Another solution, used here, is to always expand $ac_aux_dir to an +# absolute PATH. The drawback is that using absolute paths prevent a +# configured tree to be moved without reconfiguration. + +AC_DEFUN([AM_AUX_DIR_EXPAND], +[dnl Rely on autoconf to set up CDPATH properly. +AC_PREREQ([2.50])dnl +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` +]) + +# AM_CONDITIONAL -*- Autoconf -*- + +# Copyright (C) 1997-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_CONDITIONAL(NAME, SHELL-CONDITION) +# ------------------------------------- +# Define a conditional. +AC_DEFUN([AM_CONDITIONAL], +[AC_PREREQ([2.52])dnl + m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], + [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl +AC_SUBST([$1_TRUE])dnl +AC_SUBST([$1_FALSE])dnl +_AM_SUBST_NOTMAKE([$1_TRUE])dnl +_AM_SUBST_NOTMAKE([$1_FALSE])dnl +m4_define([_AM_COND_VALUE_$1], [$2])dnl +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi +AC_CONFIG_COMMANDS_PRE( +[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then + AC_MSG_ERROR([[conditional "$1" was never defined. +Usually this means the macro was only invoked conditionally.]]) +fi])]) + +# Copyright (C) 1999-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + + +# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be +# written in clear, in which case automake, when reading aclocal.m4, +# will think it sees a *use*, and therefore will trigger all it's +# C support machinery. Also note that it means that autoscan, seeing +# CC etc. in the Makefile, will ask for an AC_PROG_CC use... + + +# _AM_DEPENDENCIES(NAME) +# ---------------------- +# See how the compiler implements dependency checking. +# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". +# We try a few techniques and use that to set a single cache variable. +# +# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was +# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular +# dependency, and given that the user is not expected to run this macro, +# just rely on AC_PROG_CC. +AC_DEFUN([_AM_DEPENDENCIES], +[AC_REQUIRE([AM_SET_DEPDIR])dnl +AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl +AC_REQUIRE([AM_MAKE_INCLUDE])dnl +AC_REQUIRE([AM_DEP_TRACK])dnl + +m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], + [$1], [CXX], [depcc="$CXX" am_compiler_list=], + [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], + [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], + [$1], [UPC], [depcc="$UPC" am_compiler_list=], + [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], + [depcc="$$1" am_compiler_list=]) + +AC_CACHE_CHECK([dependency style of $depcc], + [am_cv_$1_dependencies_compiler_type], +[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_$1_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` + fi + am__universal=false + m4_case([$1], [CC], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac], + [CXX], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac]) + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_$1_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_$1_dependencies_compiler_type=none +fi +]) +AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) +AM_CONDITIONAL([am__fastdep$1], [ + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) +]) + + +# AM_SET_DEPDIR +# ------------- +# Choose a directory name for dependency files. +# This macro is AC_REQUIREd in _AM_DEPENDENCIES. +AC_DEFUN([AM_SET_DEPDIR], +[AC_REQUIRE([AM_SET_LEADING_DOT])dnl +AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl +]) + + +# AM_DEP_TRACK +# ------------ +AC_DEFUN([AM_DEP_TRACK], +[AC_ARG_ENABLE([dependency-tracking], [dnl +AS_HELP_STRING( + [--enable-dependency-tracking], + [do not reject slow dependency extractors]) +AS_HELP_STRING( + [--disable-dependency-tracking], + [speeds up one-time build])]) +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi +AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) +AC_SUBST([AMDEPBACKSLASH])dnl +_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl +AC_SUBST([am__nodep])dnl +_AM_SUBST_NOTMAKE([am__nodep])dnl +]) + +# Generate code to set up dependency tracking. -*- Autoconf -*- + +# Copyright (C) 1999-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + + +# _AM_OUTPUT_DEPENDENCY_COMMANDS +# ------------------------------ +AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], +[{ + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named 'Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`AS_DIRNAME("$mf")` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running 'make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "$am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`AS_DIRNAME(["$file"])` + AS_MKDIR_P([$dirpart/$fdir]) + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} +])# _AM_OUTPUT_DEPENDENCY_COMMANDS + + +# AM_OUTPUT_DEPENDENCY_COMMANDS +# ----------------------------- +# This macro should only be invoked once -- use via AC_REQUIRE. +# +# This code is only required when automatic dependency tracking +# is enabled. FIXME. This creates each '.P' file that we will +# need in order to bootstrap the dependency handling code. +AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], +[AC_CONFIG_COMMANDS([depfiles], + [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], + [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) +]) + +# Do all the work for Automake. -*- Autoconf -*- + +# Copyright (C) 1996-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This macro actually does too much. Some checks are only needed if +# your package does certain things. But this isn't really a big deal. + +# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) +# AM_INIT_AUTOMAKE([OPTIONS]) +# ----------------------------------------------- +# The call with PACKAGE and VERSION arguments is the old style +# call (pre autoconf-2.50), which is being phased out. PACKAGE +# and VERSION should now be passed to AC_INIT and removed from +# the call to AM_INIT_AUTOMAKE. +# We support both call styles for the transition. After +# the next Automake release, Autoconf can make the AC_INIT +# arguments mandatory, and then we can depend on a new Autoconf +# release and drop the old call support. +AC_DEFUN([AM_INIT_AUTOMAKE], +[AC_PREREQ([2.65])dnl +dnl Autoconf wants to disallow AM_ names. We explicitly allow +dnl the ones we care about. +m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl +AC_REQUIRE([AC_PROG_INSTALL])dnl +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi +AC_SUBST([CYGPATH_W]) + +# Define the identity of the package. +dnl Distinguish between old-style and new-style calls. +m4_ifval([$2], +[AC_DIAGNOSE([obsolete], + [$0: two- and three-arguments forms are deprecated.]) +m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl + AC_SUBST([PACKAGE], [$1])dnl + AC_SUBST([VERSION], [$2])], +[_AM_SET_OPTIONS([$1])dnl +dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. +m4_if( + m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), + [ok:ok],, + [m4_fatal([AC_INIT should be called with package and version arguments])])dnl + AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl + AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl + +_AM_IF_OPTION([no-define],, +[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) + AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl + +# Some tools Automake needs. +AC_REQUIRE([AM_SANITY_CHECK])dnl +AC_REQUIRE([AC_ARG_PROGRAM])dnl +AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) +AM_MISSING_PROG([AUTOCONF], [autoconf]) +AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) +AM_MISSING_PROG([AUTOHEADER], [autoheader]) +AM_MISSING_PROG([MAKEINFO], [makeinfo]) +AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl +AC_REQUIRE([AC_PROG_MKDIR_P])dnl +# For better backward compatibility. To be removed once Automake 1.9.x +# dies out for good. For more background, see: +# +# +AC_SUBST([mkdir_p], ['$(MKDIR_P)']) +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([AC_PROG_MAKE_SET])dnl +AC_REQUIRE([AM_SET_LEADING_DOT])dnl +_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], + [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], + [_AM_PROG_TAR([v7])])]) +_AM_IF_OPTION([no-dependencies],, +[AC_PROVIDE_IFELSE([AC_PROG_CC], + [_AM_DEPENDENCIES([CC])], + [m4_define([AC_PROG_CC], + m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_CXX], + [_AM_DEPENDENCIES([CXX])], + [m4_define([AC_PROG_CXX], + m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_OBJC], + [_AM_DEPENDENCIES([OBJC])], + [m4_define([AC_PROG_OBJC], + m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], + [_AM_DEPENDENCIES([OBJCXX])], + [m4_define([AC_PROG_OBJCXX], + m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl +]) +AC_REQUIRE([AM_SILENT_RULES])dnl +dnl The testsuite driver may need to know about EXEEXT, so add the +dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This +dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. +AC_CONFIG_COMMANDS_PRE(dnl +[m4_provide_if([_AM_COMPILER_EXEEXT], + [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl +]) + +dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not +dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further +dnl mangled by Autoconf and run in a shell conditional statement. +m4_define([_AC_COMPILER_EXEEXT], +m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) + + +# When config.status generates a header, we must update the stamp-h file. +# This file resides in the same directory as the config header +# that is generated. The stamp files are numbered to have different names. + +# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the +# loop where config.status creates the headers, so we can generate +# our stamp files there. +AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], +[# Compute $1's index in $config_headers. +_am_arg=$1 +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) + +# Copyright (C) 2001-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_SH +# ------------------ +# Define $install_sh. +AC_DEFUN([AM_PROG_INSTALL_SH], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +if test x"${install_sh}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi +AC_SUBST([install_sh])]) + +# Copyright (C) 2003-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# Check whether the underlying file-system supports filenames +# with a leading dot. For instance MS-DOS doesn't. +AC_DEFUN([AM_SET_LEADING_DOT], +[rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null +AC_SUBST([am__leading_dot])]) + +# Check to see how 'make' treats includes. -*- Autoconf -*- + +# Copyright (C) 2001-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_MAKE_INCLUDE() +# ----------------- +# Check to see how make treats includes. +AC_DEFUN([AM_MAKE_INCLUDE], +[am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo this is the am__doit target +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +AC_MSG_CHECKING([for style of include used by $am_make]) +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# Ignore all kinds of additional output from 'make'. +case `$am_make -s -f confmf 2> /dev/null` in #( +*the\ am__doit\ target*) + am__include=include + am__quote= + _am_result=GNU + ;; +esac +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + case `$am_make -s -f confmf 2> /dev/null` in #( + *the\ am__doit\ target*) + am__include=.include + am__quote="\"" + _am_result=BSD + ;; + esac +fi +AC_SUBST([am__include]) +AC_SUBST([am__quote]) +AC_MSG_RESULT([$_am_result]) +rm -f confinc confmf +]) + +# Copyright (C) 1999-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_CC_C_O +# -------------- +# Like AC_PROG_CC_C_O, but changed for automake. +AC_DEFUN([AM_PROG_CC_C_O], +[AC_REQUIRE([AC_PROG_CC_C_O])dnl +AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([compile])dnl +# FIXME: we rely on the cache variable name because +# there is no other way. +set dummy $CC +am_cc=`echo $[2] | sed ['s/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/']` +eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o +if test "$am_t" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi +dnl Make sure AC_PROG_CC is never called again, or it will override our +dnl setting of CC. +m4_define([AC_PROG_CC], + [m4_fatal([AC_PROG_CC cannot be called after AM_PROG_CC_C_O])]) +]) + +# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- + +# Copyright (C) 1997-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_MISSING_PROG(NAME, PROGRAM) +# ------------------------------ +AC_DEFUN([AM_MISSING_PROG], +[AC_REQUIRE([AM_MISSING_HAS_RUN]) +$1=${$1-"${am_missing_run}$2"} +AC_SUBST($1)]) + +# AM_MISSING_HAS_RUN +# ------------------ +# Define MISSING if not defined so far and test if it is modern enough. +# If it is, set am_missing_run to use it, otherwise, to nothing. +AC_DEFUN([AM_MISSING_HAS_RUN], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([missing])dnl +if test x"${MISSING+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; + *) + MISSING="\${SHELL} $am_aux_dir/missing" ;; + esac +fi +# Use eval to expand $SHELL +if eval "$MISSING --is-lightweight"; then + am_missing_run="$MISSING " +else + am_missing_run= + AC_MSG_WARN(['missing' script is too old or missing]) +fi +]) + +# Helper functions for option handling. -*- Autoconf -*- + +# Copyright (C) 2001-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_MANGLE_OPTION(NAME) +# ----------------------- +AC_DEFUN([_AM_MANGLE_OPTION], +[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) + +# _AM_SET_OPTION(NAME) +# -------------------- +# Set option NAME. Presently that only means defining a flag for this option. +AC_DEFUN([_AM_SET_OPTION], +[m4_define(_AM_MANGLE_OPTION([$1]), [1])]) + +# _AM_SET_OPTIONS(OPTIONS) +# ------------------------ +# OPTIONS is a space-separated list of Automake options. +AC_DEFUN([_AM_SET_OPTIONS], +[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) + +# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) +# ------------------------------------------- +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +AC_DEFUN([_AM_IF_OPTION], +[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) + +# Copyright (C) 1999-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + + +# AM_PATH_PYTHON([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# --------------------------------------------------------------------------- +# Adds support for distributing Python modules and packages. To +# install modules, copy them to $(pythondir), using the python_PYTHON +# automake variable. To install a package with the same name as the +# automake package, install to $(pkgpythondir), or use the +# pkgpython_PYTHON automake variable. +# +# The variables $(pyexecdir) and $(pkgpyexecdir) are provided as +# locations to install python extension modules (shared libraries). +# Another macro is required to find the appropriate flags to compile +# extension modules. +# +# If your package is configured with a different prefix to python, +# users will have to add the install directory to the PYTHONPATH +# environment variable, or create a .pth file (see the python +# documentation for details). +# +# If the MINIMUM-VERSION argument is passed, AM_PATH_PYTHON will +# cause an error if the version of python installed on the system +# doesn't meet the requirement. MINIMUM-VERSION should consist of +# numbers and dots only. +AC_DEFUN([AM_PATH_PYTHON], + [ + dnl Find a Python interpreter. Python versions prior to 2.0 are not + dnl supported. (2.0 was released on October 16, 2000). + m4_define_default([_AM_PYTHON_INTERPRETER_LIST], +[python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 dnl + python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0]) + + AC_ARG_VAR([PYTHON], [the Python interpreter]) + + m4_if([$1],[],[ + dnl No version check is needed. + # Find any Python interpreter. + if test -z "$PYTHON"; then + AC_PATH_PROGS([PYTHON], _AM_PYTHON_INTERPRETER_LIST, :) + fi + am_display_PYTHON=python + ], [ + dnl A version check is needed. + if test -n "$PYTHON"; then + # If the user set $PYTHON, use it and don't search something else. + AC_MSG_CHECKING([whether $PYTHON version is >= $1]) + AM_PYTHON_CHECK_VERSION([$PYTHON], [$1], + [AC_MSG_RESULT([yes])], + [AC_MSG_RESULT([no]) + AC_MSG_ERROR([Python interpreter is too old])]) + am_display_PYTHON=$PYTHON + else + # Otherwise, try each interpreter until we find one that satisfies + # VERSION. + AC_CACHE_CHECK([for a Python interpreter with version >= $1], + [am_cv_pathless_PYTHON],[ + for am_cv_pathless_PYTHON in _AM_PYTHON_INTERPRETER_LIST none; do + test "$am_cv_pathless_PYTHON" = none && break + AM_PYTHON_CHECK_VERSION([$am_cv_pathless_PYTHON], [$1], [break]) + done]) + # Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON. + if test "$am_cv_pathless_PYTHON" = none; then + PYTHON=: + else + AC_PATH_PROG([PYTHON], [$am_cv_pathless_PYTHON]) + fi + am_display_PYTHON=$am_cv_pathless_PYTHON + fi + ]) + + if test "$PYTHON" = :; then + dnl Run any user-specified action, or abort. + m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])]) + else + + dnl Query Python for its version number. Getting [:3] seems to be + dnl the best way to do this; it's what "site.py" does in the standard + dnl library. + + AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version], + [am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`]) + AC_SUBST([PYTHON_VERSION], [$am_cv_python_version]) + + dnl Use the values of $prefix and $exec_prefix for the corresponding + dnl values of PYTHON_PREFIX and PYTHON_EXEC_PREFIX. These are made + dnl distinct variables so they can be overridden if need be. However, + dnl general consensus is that you shouldn't need this ability. + + AC_SUBST([PYTHON_PREFIX], ['${prefix}']) + AC_SUBST([PYTHON_EXEC_PREFIX], ['${exec_prefix}']) + + dnl At times (like when building shared libraries) you may want + dnl to know which OS platform Python thinks this is. + + AC_CACHE_CHECK([for $am_display_PYTHON platform], [am_cv_python_platform], + [am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"`]) + AC_SUBST([PYTHON_PLATFORM], [$am_cv_python_platform]) + + # Just factor out some code duplication. + am_python_setup_sysconfig="\ +import sys +# Prefer sysconfig over distutils.sysconfig, for better compatibility +# with python 3.x. See automake bug#10227. +try: + import sysconfig +except ImportError: + can_use_sysconfig = 0 +else: + can_use_sysconfig = 1 +# Can't use sysconfig in CPython 2.7, since it's broken in virtualenvs: +# +try: + from platform import python_implementation + if python_implementation() == 'CPython' and sys.version[[:3]] == '2.7': + can_use_sysconfig = 0 +except ImportError: + pass" + + dnl Set up 4 directories: + + dnl pythondir -- where to install python scripts. This is the + dnl site-packages directory, not the python standard library + dnl directory like in previous automake betas. This behavior + dnl is more consistent with lispdir.m4 for example. + dnl Query distutils for this directory. + AC_CACHE_CHECK([for $am_display_PYTHON script directory], + [am_cv_python_pythondir], + [if test "x$prefix" = xNONE + then + am_py_prefix=$ac_default_prefix + else + am_py_prefix=$prefix + fi + am_cv_python_pythondir=`$PYTHON -c " +$am_python_setup_sysconfig +if can_use_sysconfig: + sitedir = sysconfig.get_path('purelib', vars={'base':'$am_py_prefix'}) +else: + from distutils import sysconfig + sitedir = sysconfig.get_python_lib(0, 0, prefix='$am_py_prefix') +sys.stdout.write(sitedir)"` + case $am_cv_python_pythondir in + $am_py_prefix*) + am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'` + am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"` + ;; + *) + case $am_py_prefix in + /usr|/System*) ;; + *) + am_cv_python_pythondir=$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages + ;; + esac + ;; + esac + ]) + AC_SUBST([pythondir], [$am_cv_python_pythondir]) + + dnl pkgpythondir -- $PACKAGE directory under pythondir. Was + dnl PYTHON_SITE_PACKAGE in previous betas, but this naming is + dnl more consistent with the rest of automake. + + AC_SUBST([pkgpythondir], [\${pythondir}/$PACKAGE]) + + dnl pyexecdir -- directory for installing python extension modules + dnl (shared libraries) + dnl Query distutils for this directory. + AC_CACHE_CHECK([for $am_display_PYTHON extension module directory], + [am_cv_python_pyexecdir], + [if test "x$exec_prefix" = xNONE + then + am_py_exec_prefix=$am_py_prefix + else + am_py_exec_prefix=$exec_prefix + fi + am_cv_python_pyexecdir=`$PYTHON -c " +$am_python_setup_sysconfig +if can_use_sysconfig: + sitedir = sysconfig.get_path('platlib', vars={'platbase':'$am_py_prefix'}) +else: + from distutils import sysconfig + sitedir = sysconfig.get_python_lib(1, 0, prefix='$am_py_prefix') +sys.stdout.write(sitedir)"` + case $am_cv_python_pyexecdir in + $am_py_exec_prefix*) + am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'` + am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"` + ;; + *) + case $am_py_exec_prefix in + /usr|/System*) ;; + *) + am_cv_python_pyexecdir=$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages + ;; + esac + ;; + esac + ]) + AC_SUBST([pyexecdir], [$am_cv_python_pyexecdir]) + + dnl pkgpyexecdir -- $(pyexecdir)/$(PACKAGE) + + AC_SUBST([pkgpyexecdir], [\${pyexecdir}/$PACKAGE]) + + dnl Run any user-specified action. + $2 + fi + +]) + + +# AM_PYTHON_CHECK_VERSION(PROG, VERSION, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) +# --------------------------------------------------------------------------- +# Run ACTION-IF-TRUE if the Python interpreter PROG has version >= VERSION. +# Run ACTION-IF-FALSE otherwise. +# This test uses sys.hexversion instead of the string equivalent (first +# word of sys.version), in order to cope with versions such as 2.2c1. +# This supports Python 2.0 or higher. (2.0 was released on October 16, 2000). +AC_DEFUN([AM_PYTHON_CHECK_VERSION], + [prog="import sys +# split strings by '.' and convert to numeric. Append some zeros +# because we need at least 4 digits for the hex conversion. +# map returns an iterator in Python 3.0 and a list in 2.x +minver = list(map(int, '$2'.split('.'))) + [[0, 0, 0]] +minverhex = 0 +# xrange is not present in Python 3.0 and range returns an iterator +for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]] +sys.exit(sys.hexversion < minverhex)" + AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])]) + +# Copyright (C) 2001-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_RUN_LOG(COMMAND) +# ------------------- +# Run COMMAND, save the exit status in ac_status, and log it. +# (This has been adapted from Autoconf's _AC_RUN_LOG macro.) +AC_DEFUN([AM_RUN_LOG], +[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD + ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + (exit $ac_status); }]) + +# Check to make sure that the build environment is sane. -*- Autoconf -*- + +# Copyright (C) 1996-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_SANITY_CHECK +# --------------- +AC_DEFUN([AM_SANITY_CHECK], +[AC_MSG_CHECKING([whether build environment is sane]) +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[[\\\"\#\$\&\'\`$am_lf]]*) + AC_MSG_ERROR([unsafe absolute working directory name]);; +esac +case $srcdir in + *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) + AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; +esac + +# Do 'set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + am_has_slept=no + for am_try in 1 2; do + echo "timestamp, slept: $am_has_slept" > conftest.file + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$[*]" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + if test "$[*]" != "X $srcdir/configure conftest.file" \ + && test "$[*]" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken + alias in your environment]) + fi + if test "$[2]" = conftest.file || test $am_try -eq 2; then + break + fi + # Just in case. + sleep 1 + am_has_slept=yes + done + test "$[2]" = conftest.file + ) +then + # Ok. + : +else + AC_MSG_ERROR([newly created file is older than distributed files! +Check your system clock]) +fi +AC_MSG_RESULT([yes]) +# If we didn't sleep, we still need to ensure time stamps of config.status and +# generated files are strictly newer. +am_sleep_pid= +if grep 'slept: no' conftest.file >/dev/null 2>&1; then + ( sleep 1 ) & + am_sleep_pid=$! +fi +AC_CONFIG_COMMANDS_PRE( + [AC_MSG_CHECKING([that generated files are newer than configure]) + if test -n "$am_sleep_pid"; then + # Hide warnings about reused PIDs. + wait $am_sleep_pid 2>/dev/null + fi + AC_MSG_RESULT([done])]) +rm -f conftest.file +]) + +# Copyright (C) 2009-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_SILENT_RULES([DEFAULT]) +# -------------------------- +# Enable less verbose build rules; with the default set to DEFAULT +# ("yes" being less verbose, "no" or empty being verbose). +AC_DEFUN([AM_SILENT_RULES], +[AC_ARG_ENABLE([silent-rules], [dnl +AS_HELP_STRING( + [--enable-silent-rules], + [less verbose build output (undo: "make V=1")]) +AS_HELP_STRING( + [--disable-silent-rules], + [verbose build output (undo: "make V=0")])dnl +]) +case $enable_silent_rules in @%:@ ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; +esac +dnl +dnl A few 'make' implementations (e.g., NonStop OS and NextStep) +dnl do not support nested variable expansions. +dnl See automake bug#9928 and bug#10237. +am_make=${MAKE-make} +AC_CACHE_CHECK([whether $am_make supports nested variables], + [am_cv_make_support_nested_variables], + [if AS_ECHO([['TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi]) +if test $am_cv_make_support_nested_variables = yes; then + dnl Using '$V' instead of '$(V)' breaks IRIX make. + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AC_SUBST([AM_V])dnl +AM_SUBST_NOTMAKE([AM_V])dnl +AC_SUBST([AM_DEFAULT_V])dnl +AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl +AC_SUBST([AM_DEFAULT_VERBOSITY])dnl +AM_BACKSLASH='\' +AC_SUBST([AM_BACKSLASH])dnl +_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl +]) + +# Copyright (C) 2001-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_STRIP +# --------------------- +# One issue with vendor 'install' (even GNU) is that you can't +# specify the program used to strip binaries. This is especially +# annoying in cross-compiling environments, where the build's strip +# is unlikely to handle the host's binaries. +# Fortunately install-sh will honor a STRIPPROG variable, so we +# always use install-sh in "make install-strip", and initialize +# STRIPPROG with the value of the STRIP variable (set by the user). +AC_DEFUN([AM_PROG_INSTALL_STRIP], +[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +# Installed binaries are usually stripped using 'strip' when the user +# run "make install-strip". However 'strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the 'STRIP' environment variable to overrule this program. +dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. +if test "$cross_compiling" != no; then + AC_CHECK_TOOL([STRIP], [strip], :) +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" +AC_SUBST([INSTALL_STRIP_PROGRAM])]) + +# Copyright (C) 2006-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_SUBST_NOTMAKE(VARIABLE) +# --------------------------- +# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. +# This macro is traced by Automake. +AC_DEFUN([_AM_SUBST_NOTMAKE]) + +# AM_SUBST_NOTMAKE(VARIABLE) +# -------------------------- +# Public sister of _AM_SUBST_NOTMAKE. +AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) + +# Check how to create a tarball. -*- Autoconf -*- + +# Copyright (C) 2004-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_PROG_TAR(FORMAT) +# -------------------- +# Check how to create a tarball in format FORMAT. +# FORMAT should be one of 'v7', 'ustar', or 'pax'. +# +# Substitute a variable $(am__tar) that is a command +# writing to stdout a FORMAT-tarball containing the directory +# $tardir. +# tardir=directory && $(am__tar) > result.tar +# +# Substitute a variable $(am__untar) that extract such +# a tarball read from stdin. +# $(am__untar) < result.tar +# +AC_DEFUN([_AM_PROG_TAR], +[# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AC_SUBST([AMTAR], ['$${TAR-tar}']) + +# We'll loop over all known methods to create a tar archive until one works. +_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' + +m4_if([$1], [v7], + [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], + + [m4_case([$1], + [ustar], + [# The POSIX 1988 'ustar' format is defined with fixed-size fields. + # There is notably a 21 bits limit for the UID and the GID. In fact, + # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 + # and bug#13588). + am_max_uid=2097151 # 2^21 - 1 + am_max_gid=$am_max_uid + # The $UID and $GID variables are not portable, so we need to resort + # to the POSIX-mandated id(1) utility. Errors in the 'id' calls + # below are definitely unexpected, so allow the users to see them + # (that is, avoid stderr redirection). + am_uid=`id -u || echo unknown` + am_gid=`id -g || echo unknown` + AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) + if test $am_uid -le $am_max_uid; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + _am_tools=none + fi + AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) + if test $am_gid -le $am_max_gid; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + _am_tools=none + fi], + + [pax], + [], + + [m4_fatal([Unknown tar format])]) + + AC_MSG_CHECKING([how to create a $1 tar archive]) + + # Go ahead even if we have the value already cached. We do so because we + # need to set the values for the 'am__tar' and 'am__untar' variables. + _am_tools=${am_cv_prog_tar_$1-$_am_tools} + + for _am_tool in $_am_tools; do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; do + AM_RUN_LOG([$_am_tar --version]) && break + done + am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x $1 -w "$$tardir"' + am__tar_='pax -L -x $1 -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H $1 -L' + am__tar_='find "$tardir" -print | cpio -o -H $1 -L' + am__untar='cpio -i -H $1 -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_$1}" && break + + # tar/untar a dummy directory, and stop if the command works. + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) + rm -rf conftest.dir + if test -s conftest.tar; then + AM_RUN_LOG([$am__untar /dev/null 2>&1 && break + fi + done + rm -rf conftest.dir + + AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) + AC_MSG_RESULT([$am_cv_prog_tar_$1])]) + +AC_SUBST([am__tar]) +AC_SUBST([am__untar]) +]) # _AM_PROG_TAR + +m4_include([build/ax_boost_base.m4]) +m4_include([build/ax_check_openssl.m4]) +m4_include([build/ax_compare_version.m4]) +m4_include([build/ax_compiler_vendor.m4]) +m4_include([build/ax_cxx_compile_stdcxx_11.m4]) +m4_include([build/ax_lib_curl.m4]) +m4_include([build/ax_lib_readline.m4]) +m4_include([build/ax_path_generic.m4]) +m4_include([build/ax_perl_modules.m4]) +m4_include([build/ax_with_curses.m4]) +m4_include([build/common.m4]) +m4_include([build/crypto.m4]) +m4_include([build/jemalloc.m4]) +m4_include([build/libtool.m4]) +m4_include([build/ltoptions.m4]) +m4_include([build/ltsugar.m4]) +m4_include([build/ltversion.m4]) +m4_include([build/lt~obsolete.m4]) +m4_include([build/lzma.m4]) +m4_include([build/network.m4]) +m4_include([build/pcre.m4]) +m4_include([build/pkg.m4]) +m4_include([build/tcl.m4]) +m4_include([build/tcmalloc.m4]) +m4_include([build/xml.m4]) +m4_include([build/zlib.m4]) diff --git a/build/aux/ar-lib b/build/aux/ar-lib new file mode 100755 index 00000000..fe2301e7 --- /dev/null +++ b/build/aux/ar-lib @@ -0,0 +1,270 @@ +#! /bin/sh +# Wrapper for Microsoft lib.exe + +me=ar-lib +scriptversion=2012-03-01.08; # UTC + +# Copyright (C) 2010-2013 Free Software Foundation, Inc. +# Written by Peter Rosin . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + + +# func_error message +func_error () +{ + echo "$me: $1" 1>&2 + exit 1 +} + +file_conv= + +# func_file_conv build_file +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv in + mingw) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_at_file at_file operation archive +# Iterate over all members in AT_FILE performing OPERATION on ARCHIVE +# for each of them. +# When interpreting the content of the @FILE, do NOT use func_file_conv, +# since the user would need to supply preconverted file names to +# binutils ar, at least for MinGW. +func_at_file () +{ + operation=$2 + archive=$3 + at_file_contents=`cat "$1"` + eval set x "$at_file_contents" + shift + + for member + do + $AR -NOLOGO $operation:"$member" "$archive" || exit $? + done +} + +case $1 in + '') + func_error "no command. Try '$0 --help' for more information." + ;; + -h | --h*) + cat <. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' + +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent tools from complaining about whitespace usage. +IFS=" "" $nl" + +file_conv= + +# func_file_conv build_file lazy +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. If the determined conversion +# type is listed in (the comma separated) LAZY, no conversion will +# take place. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv/,$2, in + *,$file_conv,*) + ;; + mingw/*) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin/*) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine/*) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_cl_dashL linkdir +# Make cl look for libraries in LINKDIR +func_cl_dashL () +{ + func_file_conv "$1" + if test -z "$lib_path"; then + lib_path=$file + else + lib_path="$lib_path;$file" + fi + linker_opts="$linker_opts -LIBPATH:$file" +} + +# func_cl_dashl library +# Do a library search-path lookup for cl +func_cl_dashl () +{ + lib=$1 + found=no + save_IFS=$IFS + IFS=';' + for dir in $lib_path $LIB + do + IFS=$save_IFS + if $shared && test -f "$dir/$lib.dll.lib"; then + found=yes + lib=$dir/$lib.dll.lib + break + fi + if test -f "$dir/$lib.lib"; then + found=yes + lib=$dir/$lib.lib + break + fi + if test -f "$dir/lib$lib.a"; then + found=yes + lib=$dir/lib$lib.a + break + fi + done + IFS=$save_IFS + + if test "$found" != yes; then + lib=$lib.lib + fi +} + +# func_cl_wrapper cl arg... +# Adjust compile command to suit cl +func_cl_wrapper () +{ + # Assume a capable shell + lib_path= + shared=: + linker_opts= + for arg + do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + eat=1 + case $2 in + *.o | *.[oO][bB][jJ]) + func_file_conv "$2" + set x "$@" -Fo"$file" + shift + ;; + *) + func_file_conv "$2" + set x "$@" -Fe"$file" + shift + ;; + esac + ;; + -I) + eat=1 + func_file_conv "$2" mingw + set x "$@" -I"$file" + shift + ;; + -I*) + func_file_conv "${1#-I}" mingw + set x "$@" -I"$file" + shift + ;; + -l) + eat=1 + func_cl_dashl "$2" + set x "$@" "$lib" + shift + ;; + -l*) + func_cl_dashl "${1#-l}" + set x "$@" "$lib" + shift + ;; + -L) + eat=1 + func_cl_dashL "$2" + ;; + -L*) + func_cl_dashL "${1#-L}" + ;; + -static) + shared=false + ;; + -Wl,*) + arg=${1#-Wl,} + save_ifs="$IFS"; IFS=',' + for flag in $arg; do + IFS="$save_ifs" + linker_opts="$linker_opts $flag" + done + IFS="$save_ifs" + ;; + -Xlinker) + eat=1 + linker_opts="$linker_opts $2" + ;; + -*) + set x "$@" "$1" + shift + ;; + *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) + func_file_conv "$1" + set x "$@" -Tp"$file" + shift + ;; + *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) + func_file_conv "$1" mingw + set x "$@" "$file" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift + done + if test -n "$linker_opts"; then + linker_opts="-link$linker_opts" + fi + exec "$@" $linker_opts + exit 1 +} + +eat= + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: compile [--help] [--version] PROGRAM [ARGS] + +Wrapper for compilers which do not understand '-c -o'. +Remove '-o dest.o' from ARGS, run PROGRAM with the remaining +arguments, and rename the output as expected. + +If you are trying to build a whole package this is not the +right script to run: please start by reading the file 'INSTALL'. + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "compile $scriptversion" + exit $? + ;; + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe ) + func_cl_wrapper "$@" # Doesn't return... + ;; +esac + +ofile= +cfile= + +for arg +do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + # So we strip '-o arg' only if arg is an object. + eat=1 + case $2 in + *.o | *.obj) + ofile=$2 + ;; + *) + set x "$@" -o "$2" + shift + ;; + esac + ;; + *.c) + cfile=$1 + set x "$@" "$1" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift +done + +if test -z "$ofile" || test -z "$cfile"; then + # If no '-o' option was seen then we might have been invoked from a + # pattern rule where we don't need one. That is ok -- this is a + # normal compilation that the losing compiler can handle. If no + # '.c' file was seen then we are probably linking. That is also + # ok. + exec "$@" +fi + +# Name of file we expect compiler to create. +cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` + +# Create the lock directory. +# Note: use '[/\\:.-]' here to ensure that we don't use the same name +# that we are using for the .o file. Also, base the name on the expected +# object file name, since that is what matters with a parallel build. +lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d +while true; do + if mkdir "$lockdir" >/dev/null 2>&1; then + break + fi + sleep 1 +done +# FIXME: race condition here if user kills between mkdir and trap. +trap "rmdir '$lockdir'; exit 1" 1 2 15 + +# Run the compile. +"$@" +ret=$? + +if test -f "$cofile"; then + test "$cofile" = "$ofile" || mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" +fi + +rmdir "$lockdir" +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/build/aux/config.guess b/build/aux/config.guess new file mode 100755 index 00000000..1804e9fc --- /dev/null +++ b/build/aux/config.guess @@ -0,0 +1,1535 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013 Free Software Foundation, Inc. + +timestamp='2012-12-29' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD +# +# Please send patches with a ChangeLog entry to config-patches@gnu.org. + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, +2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, +2012, 2013 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH="i386" + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH="x86_64" + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + i*:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi + echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-gnu + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-gnueabi + else + echo ${UNAME_MACHINE}-unknown-linux-gnueabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-gnu + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-gnu + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + i*86:Linux:*:*) + LIBC=gnu + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #ifdef __dietlibc__ + LIBC=dietlibc + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` + echo "${UNAME_MACHINE}-pc-linux-${LIBC}" + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } + ;; + or32:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-gnu + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-gnu + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-gnu ;; + PA8*) echo hppa2.0-unknown-linux-gnu ;; + *) echo hppa-unknown-linux-gnu ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-gnu + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-gnu + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-gnu + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configury will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + case $UNAME_PROCESSOR in + i386) + eval $set_cc_for_build + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + UNAME_PROCESSOR="x86_64" + fi + fi ;; + unknown) UNAME_PROCESSOR=powerpc ;; + esac + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; +esac + +eval $set_cc_for_build +cat >$dummy.c < +# include +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix\n"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + c34*) + echo c34-convex-bsd + exit ;; + c38*) + echo c38-convex-bsd + exit ;; + c4*) + echo c4-convex-bsd + exit ;; + esac +fi + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/build/aux/config.sub b/build/aux/config.sub new file mode 100755 index 00000000..52f04bcd --- /dev/null +++ b/build/aux/config.sub @@ -0,0 +1,1790 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013 Free Software Foundation, Inc. + +timestamp='2012-12-29' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches with a ChangeLog entry to config-patches@gnu.org. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, +2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, +2012, 2013 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | \ + kopensolaris*-gnu* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | be32 | be64 \ + | bfin \ + | c4x | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | epiphany \ + | fido | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 \ + | ns16k | ns32k \ + | open8 \ + | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pyramid \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pyramid-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i386-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i386-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-* | ppc64p7-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -kaos*) + os=-kaos + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/build/aux/depcomp b/build/aux/depcomp new file mode 100755 index 00000000..4ebd5b3a --- /dev/null +++ b/build/aux/depcomp @@ -0,0 +1,791 @@ +#! /bin/sh +# depcomp - compile a program generating dependencies as side-effects + +scriptversion=2013-05-30.07; # UTC + +# Copyright (C) 1999-2013 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Alexandre Oliva . + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: depcomp [--help] [--version] PROGRAM [ARGS] + +Run PROGRAMS ARGS to compile a file, generating dependencies +as side-effects. + +Environment variables: + depmode Dependency tracking mode. + source Source file read by 'PROGRAMS ARGS'. + object Object file output by 'PROGRAMS ARGS'. + DEPDIR directory where to store dependencies. + depfile Dependency file to output. + tmpdepfile Temporary file to use when outputting dependencies. + libtool Whether libtool is used (yes/no). + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "depcomp $scriptversion" + exit $? + ;; +esac + +# Get the directory component of the given path, and save it in the +# global variables '$dir'. Note that this directory component will +# be either empty or ending with a '/' character. This is deliberate. +set_dir_from () +{ + case $1 in + */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; + *) dir=;; + esac +} + +# Get the suffix-stripped basename of the given path, and save it the +# global variable '$base'. +set_base_from () +{ + base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` +} + +# If no dependency file was actually created by the compiler invocation, +# we still have to create a dummy depfile, to avoid errors with the +# Makefile "include basename.Plo" scheme. +make_dummy_depfile () +{ + echo "#dummy" > "$depfile" +} + +# Factor out some common post-processing of the generated depfile. +# Requires the auxiliary global variable '$tmpdepfile' to be set. +aix_post_process_depfile () +{ + # If the compiler actually managed to produce a dependency file, + # post-process it. + if test -f "$tmpdepfile"; then + # Each line is of the form 'foo.o: dependency.h'. + # Do two passes, one to just change these to + # $object: dependency.h + # and one to simply output + # dependency.h: + # which is needed to avoid the deleted-header problem. + { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" + sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" + } > "$depfile" + rm -f "$tmpdepfile" + else + make_dummy_depfile + fi +} + +# A tabulation character. +tab=' ' +# A newline character. +nl=' +' +# Character ranges might be problematic outside the C locale. +# These definitions help. +upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ +lower=abcdefghijklmnopqrstuvwxyz +digits=0123456789 +alpha=${upper}${lower} + +if test -z "$depmode" || test -z "$source" || test -z "$object"; then + echo "depcomp: Variables source, object and depmode must be set" 1>&2 + exit 1 +fi + +# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. +depfile=${depfile-`echo "$object" | + sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} +tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} + +rm -f "$tmpdepfile" + +# Avoid interferences from the environment. +gccflag= dashmflag= + +# Some modes work just like other modes, but use different flags. We +# parameterize here, but still list the modes in the big case below, +# to make depend.m4 easier to write. Note that we *cannot* use a case +# here, because this file can only contain one case statement. +if test "$depmode" = hp; then + # HP compiler uses -M and no extra arg. + gccflag=-M + depmode=gcc +fi + +if test "$depmode" = dashXmstdout; then + # This is just like dashmstdout with a different argument. + dashmflag=-xM + depmode=dashmstdout +fi + +cygpath_u="cygpath -u -f -" +if test "$depmode" = msvcmsys; then + # This is just like msvisualcpp but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvisualcpp +fi + +if test "$depmode" = msvc7msys; then + # This is just like msvc7 but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvc7 +fi + +if test "$depmode" = xlc; then + # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. + gccflag=-qmakedep=gcc,-MF + depmode=gcc +fi + +case "$depmode" in +gcc3) +## gcc 3 implements dependency tracking that does exactly what +## we want. Yay! Note: for some reason libtool 1.4 doesn't like +## it if -MD -MP comes after the -MF stuff. Hmm. +## Unfortunately, FreeBSD c89 acceptance of flags depends upon +## the command line argument order; so add the flags where they +## appear in depend2.am. Note that the slowdown incurred here +## affects only configure: in makefiles, %FASTDEP% shortcuts this. + for arg + do + case $arg in + -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; + *) set fnord "$@" "$arg" ;; + esac + shift # fnord + shift # $arg + done + "$@" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + mv "$tmpdepfile" "$depfile" + ;; + +gcc) +## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. +## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. +## (see the conditional assignment to $gccflag above). +## There are various ways to get dependency output from gcc. Here's +## why we pick this rather obscure method: +## - Don't want to use -MD because we'd like the dependencies to end +## up in a subdir. Having to rename by hand is ugly. +## (We might end up doing this anyway to support other compilers.) +## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like +## -MM, not -M (despite what the docs say). Also, it might not be +## supported by the other compilers which use the 'gcc' depmode. +## - Using -M directly means running the compiler twice (even worse +## than renaming). + if test -z "$gccflag"; then + gccflag=-MD, + fi + "$@" -Wp,"$gccflag$tmpdepfile" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The second -e expression handles DOS-style file names with drive + # letters. + sed -e 's/^[^:]*: / /' \ + -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" +## This next piece of magic avoids the "deleted header file" problem. +## The problem is that when a header file which appears in a .P file +## is deleted, the dependency causes make to die (because there is +## typically no way to rebuild the header). We avoid this by adding +## dummy dependencies for each header file. Too bad gcc doesn't do +## this for us directly. +## Some versions of gcc put a space before the ':'. On the theory +## that the space means something, we add a space to the output as +## well. hp depmode also adds that space, but also prefixes the VPATH +## to the object. Take care to not repeat it in the output. +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +sgi) + if test "$libtool" = yes; then + "$@" "-Wp,-MDupdate,$tmpdepfile" + else + "$@" -MDupdate "$tmpdepfile" + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + + if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files + echo "$object : \\" > "$depfile" + # Clip off the initial element (the dependent). Don't try to be + # clever and replace this with sed code, as IRIX sed won't handle + # lines with more than a fixed number of characters (4096 in + # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; + # the IRIX cc adds comments like '#:fec' to the end of the + # dependency line. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ + | tr "$nl" ' ' >> "$depfile" + echo >> "$depfile" + # The second pass generates a dummy entry for each header file. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ + >> "$depfile" + else + make_dummy_depfile + fi + rm -f "$tmpdepfile" + ;; + +xlc) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +aix) + # The C for AIX Compiler uses -M and outputs the dependencies + # in a .u file. In older versions, this file always lives in the + # current directory. Also, the AIX compiler puts '$object:' at the + # start of each line; $object doesn't have directory information. + # Version 6 uses the directory in both cases. + set_dir_from "$object" + set_base_from "$object" + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.u + tmpdepfile2=$base.u + tmpdepfile3=$dir.libs/$base.u + "$@" -Wc,-M + else + tmpdepfile1=$dir$base.u + tmpdepfile2=$dir$base.u + tmpdepfile3=$dir$base.u + "$@" -M + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + aix_post_process_depfile + ;; + +tcc) + # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 + # FIXME: That version still under development at the moment of writing. + # Make that this statement remains true also for stable, released + # versions. + # It will wrap lines (doesn't matter whether long or short) with a + # trailing '\', as in: + # + # foo.o : \ + # foo.c \ + # foo.h \ + # + # It will put a trailing '\' even on the last line, and will use leading + # spaces rather than leading tabs (at least since its commit 0394caf7 + # "Emit spaces for -MD"). + "$@" -MD -MF "$tmpdepfile" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. + # We have to change lines of the first kind to '$object: \'. + sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" + # And for each line of the second kind, we have to emit a 'dep.h:' + # dummy dependency, to avoid the deleted-header problem. + sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" + rm -f "$tmpdepfile" + ;; + +## The order of this option in the case statement is important, since the +## shell code in configure will try each of these formats in the order +## listed in this file. A plain '-MD' option would be understood by many +## compilers, so we must ensure this comes after the gcc and icc options. +pgcc) + # Portland's C compiler understands '-MD'. + # Will always output deps to 'file.d' where file is the root name of the + # source file under compilation, even if file resides in a subdirectory. + # The object file name does not affect the name of the '.d' file. + # pgcc 10.2 will output + # foo.o: sub/foo.c sub/foo.h + # and will wrap long lines using '\' : + # foo.o: sub/foo.c ... \ + # sub/foo.h ... \ + # ... + set_dir_from "$object" + # Use the source, not the object, to determine the base name, since + # that's sadly what pgcc will do too. + set_base_from "$source" + tmpdepfile=$base.d + + # For projects that build the same source file twice into different object + # files, the pgcc approach of using the *source* file root name can cause + # problems in parallel builds. Use a locking strategy to avoid stomping on + # the same $tmpdepfile. + lockdir=$base.d-lock + trap " + echo '$0: caught signal, cleaning up...' >&2 + rmdir '$lockdir' + exit 1 + " 1 2 13 15 + numtries=100 + i=$numtries + while test $i -gt 0; do + # mkdir is a portable test-and-set. + if mkdir "$lockdir" 2>/dev/null; then + # This process acquired the lock. + "$@" -MD + stat=$? + # Release the lock. + rmdir "$lockdir" + break + else + # If the lock is being held by a different process, wait + # until the winning process is done or we timeout. + while test -d "$lockdir" && test $i -gt 0; do + sleep 1 + i=`expr $i - 1` + done + fi + i=`expr $i - 1` + done + trap - 1 2 13 15 + if test $i -le 0; then + echo "$0: failed to acquire lock after $numtries attempts" >&2 + echo "$0: check lockdir '$lockdir'" >&2 + exit 1 + fi + + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each line is of the form `foo.o: dependent.h', + # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp2) + # The "hp" stanza above does not work with aCC (C++) and HP's ia64 + # compilers, which have integrated preprocessors. The correct option + # to use with these is +Maked; it writes dependencies to a file named + # 'foo.d', which lands next to the object file, wherever that + # happens to be. + # Much of this is similar to the tru64 case; see comments there. + set_dir_from "$object" + set_base_from "$object" + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir.libs/$base.d + "$@" -Wc,+Maked + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + "$@" +Maked + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" + # Add 'dependent.h:' lines. + sed -ne '2,${ + s/^ *// + s/ \\*$// + s/$/:/ + p + }' "$tmpdepfile" >> "$depfile" + else + make_dummy_depfile + fi + rm -f "$tmpdepfile" "$tmpdepfile2" + ;; + +tru64) + # The Tru64 compiler uses -MD to generate dependencies as a side + # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. + # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put + # dependencies in 'foo.d' instead, so we check for that too. + # Subdirectories are respected. + set_dir_from "$object" + set_base_from "$object" + + if test "$libtool" = yes; then + # Libtool generates 2 separate objects for the 2 libraries. These + # two compilations output dependencies in $dir.libs/$base.o.d and + # in $dir$base.o.d. We have to check for both files, because + # one of the two compilations can be disabled. We should prefer + # $dir$base.o.d over $dir.libs/$base.o.d because the latter is + # automatically cleaned when .libs/ is deleted, while ignoring + # the former would cause a distcleancheck panic. + tmpdepfile1=$dir$base.o.d # libtool 1.5 + tmpdepfile2=$dir.libs/$base.o.d # Likewise. + tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 + "$@" -Wc,-MD + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + tmpdepfile3=$dir$base.d + "$@" -MD + fi + + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + # Same post-processing that is required for AIX mode. + aix_post_process_depfile + ;; + +msvc7) + if test "$libtool" = yes; then + showIncludes=-Wc,-showIncludes + else + showIncludes=-showIncludes + fi + "$@" $showIncludes > "$tmpdepfile" + stat=$? + grep -v '^Note: including file: ' "$tmpdepfile" + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The first sed program below extracts the file names and escapes + # backslashes for cygpath. The second sed program outputs the file + # name when reading, but also accumulates all include files in the + # hold buffer in order to output them again at the end. This only + # works with sed implementations that can handle large buffers. + sed < "$tmpdepfile" -n ' +/^Note: including file: *\(.*\)/ { + s//\1/ + s/\\/\\\\/g + p +}' | $cygpath_u | sort -u | sed -n ' +s/ /\\ /g +s/\(.*\)/'"$tab"'\1 \\/p +s/.\(.*\) \\/\1:/ +H +$ { + s/.*/'"$tab"'/ + G + p +}' >> "$depfile" + echo >> "$depfile" # make sure the fragment doesn't end with a backslash + rm -f "$tmpdepfile" + ;; + +msvc7msys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +#nosideeffect) + # This comment above is used by automake to tell side-effect + # dependency tracking mechanisms from slower ones. + +dashmstdout) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove '-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + test -z "$dashmflag" && dashmflag=-M + # Require at least two characters before searching for ':' + # in the target name. This is to cope with DOS-style filenames: + # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. + "$@" $dashmflag | + sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this sed invocation + # correctly. Breaking it into two sed invocations is a workaround. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +dashXmstdout) + # This case only exists to satisfy depend.m4. It is never actually + # run, as this mode is specially recognized in the preamble. + exit 1 + ;; + +makedepend) + "$@" || exit $? + # Remove any Libtool call + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + # X makedepend + shift + cleared=no eat=no + for arg + do + case $cleared in + no) + set ""; shift + cleared=yes ;; + esac + if test $eat = yes; then + eat=no + continue + fi + case "$arg" in + -D*|-I*) + set fnord "$@" "$arg"; shift ;; + # Strip any option that makedepend may not understand. Remove + # the object too, otherwise makedepend will parse it as a source file. + -arch) + eat=yes ;; + -*|$object) + ;; + *) + set fnord "$@" "$arg"; shift ;; + esac + done + obj_suffix=`echo "$object" | sed 's/^.*\././'` + touch "$tmpdepfile" + ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" + rm -f "$depfile" + # makedepend may prepend the VPATH from the source file name to the object. + # No need to regex-escape $object, excess matching of '.' is harmless. + sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process the last invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed '1,2d' "$tmpdepfile" \ + | tr ' ' "$nl" \ + | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" "$tmpdepfile".bak + ;; + +cpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove '-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + "$@" -E \ + | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + | sed '$ s: \\$::' > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + cat < "$tmpdepfile" >> "$depfile" + sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvisualcpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + IFS=" " + for arg + do + case "$arg" in + -o) + shift + ;; + $object) + shift + ;; + "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") + set fnord "$@" + shift + shift + ;; + *) + set fnord "$@" "$arg" + shift + shift + ;; + esac + done + "$@" -E 2>/dev/null | + sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" + echo "$tab" >> "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvcmsys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +none) + exec "$@" + ;; + +*) + echo "Unknown depmode $depmode" 1>&2 + exit 1 + ;; +esac + +exit 0 + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/build/aux/install-sh b/build/aux/install-sh new file mode 100755 index 00000000..377bb868 --- /dev/null +++ b/build/aux/install-sh @@ -0,0 +1,527 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2011-11-20.07; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# 'make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +nl=' +' +IFS=" "" $nl" + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit=${DOITPROG-} +if test -z "$doit"; then + doit_exec=exec +else + doit_exec=$doit +fi + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_glob='?' +initialize_posix_glob=' + test "$posix_glob" != "?" || { + if (set -f) 2>/dev/null; then + posix_glob= + else + posix_glob=: + fi + } +' + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +no_target_directory= + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *' '* | *' +'* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -s) stripcmd=$stripprog;; + + -t) dst_arg=$2 + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) no_target_directory=true;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call 'install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names problematic for 'test' and other utilities. + case $src in + -* | [=\(\)!]) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + dst=$dst_arg + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test -n "$no_target_directory"; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dst=$dstdir/`basename "$src"` + dstdir_status=0 + else + # Prefer dirname, but fall back on a substitute if dirname fails. + dstdir=` + (dirname "$dst") 2>/dev/null || + expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$dst" : 'X\(//\)[^/]' \| \ + X"$dst" : 'X\(//\)$' \| \ + X"$dst" : 'X\(/\)' \| . 2>/dev/null || + echo X"$dst" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q' + ` + + test -d "$dstdir" + dstdir_status=$? + fi + fi + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 + + if (umask $mkdir_umask && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + ls_ld_tmpdir=`ls -ld "$tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/d" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null + fi + trap '' 0;; + esac;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # The umask is ridiculous, or mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac + + eval "$initialize_posix_glob" + + oIFS=$IFS + IFS=/ + $posix_glob set -f + set fnord $dstdir + shift + $posix_glob set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + + eval "$initialize_posix_glob" && + $posix_glob set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + $posix_glob set +f && + + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/build/aux/ltmain.sh b/build/aux/ltmain.sh new file mode 100644 index 00000000..63ae69dc --- /dev/null +++ b/build/aux/ltmain.sh @@ -0,0 +1,9655 @@ + +# libtool (GNU libtool) 2.4.2 +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, +# 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, +# or obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +# Usage: $progname [OPTION]... [MODE-ARG]... +# +# Provide generalized library-building support services. +# +# --config show all configuration variables +# --debug enable verbose shell tracing +# -n, --dry-run display commands without modifying any files +# --features display basic configuration information and exit +# --mode=MODE use operation mode MODE +# --preserve-dup-deps don't remove duplicate dependency libraries +# --quiet, --silent don't print informational messages +# --no-quiet, --no-silent +# print informational messages (default) +# --no-warn don't display warning messages +# --tag=TAG use configuration variables from tag TAG +# -v, --verbose print more informational messages than default +# --no-verbose don't print the extra informational messages +# --version print version information +# -h, --help, --help-all print short, long, or detailed help message +# +# MODE must be one of the following: +# +# clean remove files from the build directory +# compile compile a source file into a libtool object +# execute automatically set library path, then run a program +# finish complete the installation of libtool libraries +# install install libraries or executables +# link create a library or an executable +# uninstall remove libraries from an installed directory +# +# MODE-ARGS vary depending on the MODE. When passed as first option, +# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that. +# Try `$progname --help --mode=MODE' for a more detailed description of MODE. +# +# When reporting a bug, please describe a test case to reproduce it and +# include the following information: +# +# host-triplet: $host +# shell: $SHELL +# compiler: $LTCC +# compiler flags: $LTCFLAGS +# linker: $LD (gnu? $with_gnu_ld) +# $progname: (GNU libtool) 2.4.2 +# automake: $automake_version +# autoconf: $autoconf_version +# +# Report bugs to . +# GNU libtool home page: . +# General help using GNU software: . + +PROGRAM=libtool +PACKAGE=libtool +VERSION=2.4.2 +TIMESTAMP="" +package_revision=1.3337 + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# NLS nuisances: We save the old values to restore during execute mode. +lt_user_locale= +lt_safe_locale= +for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test \"\${$lt_var+set}\" = set; then + save_$lt_var=\$$lt_var + $lt_var=C + export $lt_var + lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" + lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" + fi" +done +LC_ALL=C +LANGUAGE=C +export LANGUAGE LC_ALL + +$lt_unset CDPATH + + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + + + +: ${CP="cp -f"} +test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} +: ${Xsed="$SED -e 1s/^X//"} + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +exit_status=$EXIT_SUCCESS + +# Make sure IFS has a sensible default +lt_nl=' +' +IFS=" $lt_nl" + +dirname="s,/[^/]*$,," +basename="s,^.*/,," + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi +} # func_dirname may be replaced by extended shell implementation + + +# func_basename file +func_basename () +{ + func_basename_result=`$ECHO "${1}" | $SED "$basename"` +} # func_basename may be replaced by extended shell implementation + + +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi + func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` +} # func_dirname_and_basename may be replaced by extended shell implementation + + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# func_strip_suffix prefix name +func_stripname () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname may be replaced by extended shell implementation + + +# These SED scripts presuppose an absolute path with a trailing slash. +pathcar='s,^/\([^/]*\).*$,\1,' +pathcdr='s,^/[^/]*,,' +removedotparts=':dotsl + s@/\./@/@g + t dotsl + s,/\.$,/,' +collapseslashes='s@/\{1,\}@/@g' +finalslash='s,/*$,/,' + +# func_normal_abspath PATH +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +# value returned in "$func_normal_abspath_result" +func_normal_abspath () +{ + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"` + while :; do + # Processed it all yet? + if test "$func_normal_abspath_tpath" = / ; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result" ; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + +# func_relative_path SRCDIR DSTDIR +# generates a relative path from SRCDIR to DSTDIR, with a trailing +# slash if non-empty, suitable for immediately appending a filename +# without needing to append a separator. +# value returned in "$func_relative_path_result" +func_relative_path () +{ + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=${func_dirname_result} + if test "x$func_relative_path_tlibdir" = x ; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test "x$func_stripname_result" != x ; then + func_relative_path_result=${func_relative_path_result}/${func_stripname_result} + fi + + # Normalisation. If bindir is libdir, return empty string, + # else relative path ending with a slash; either way, target + # file name can be directly appended. + if test ! -z "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result/" + func_relative_path_result=$func_stripname_result + fi +} + +# The name of this program: +func_dirname_and_basename "$progpath" +progname=$func_basename_result + +# Make sure we have an absolute path for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=$func_dirname_result + progdir=`cd "$progdir" && pwd` + progpath="$progdir/$progname" + ;; + *) + save_IFS="$IFS" + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS="$save_IFS" + test -x "$progdir/$progname" && break + done + IFS="$save_IFS" + test -n "$progdir" || progdir=`pwd` + progpath="$progdir/$progname" + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed="${SED}"' -e 1s/^X//' +sed_quote_subst='s/\([`"$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s,[].[^$\\*\/],\\&,g' + +# Sed substitution that converts a w32 file name or path +# which contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-`\' parameter expansions in output of double_quote_subst that were +# `\'-ed in input to the same. If an odd number of `\' preceded a '$' +# in input to double_quote_subst, that '$' was protected from expansion. +# Since each input `\' is now two `\'s, look for any number of runs of +# four `\'s followed by two `\'s and then a '$'. `\' that '$'. +bs='\\' +bs2='\\\\' +bs4='\\\\\\\\' +dollar='\$' +sed_double_backslash="\ + s/$bs4/&\\ +/g + s/^$bs2$dollar/$bs&/ + s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g + s/\n//g" + +# Standard options: +opt_dry_run=false +opt_help=false +opt_quiet=false +opt_verbose=false +opt_warning=: + +# func_echo arg... +# Echo program name prefixed message, along with the current mode +# name if it has been set yet. +func_echo () +{ + $ECHO "$progname: ${opt_mode+$opt_mode: }$*" +} + +# func_verbose arg... +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $opt_verbose && func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +# func_error arg... +# Echo program name prefixed message to standard error. +func_error () +{ + $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2 +} + +# func_warning arg... +# Echo program name prefixed warning message to standard error. +func_warning () +{ + $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2 + + # bash bug again: + : +} + +# func_fatal_error arg... +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + +# func_fatal_help arg... +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + func_error ${1+"$@"} + func_fatal_error "$help" +} +help="Try \`$progname --help' for more information." ## default + + +# func_grep expression filename +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_mkdir_p directory-path +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + my_directory_path="$1" + my_dir_list= + + if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then + + # Protect directory names starting with `-' + case $my_directory_path in + -*) my_directory_path="./$my_directory_path" ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$my_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + my_dir_list="$my_directory_path:$my_dir_list" + + # If the last portion added has no slash in it, the list is done + case $my_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"` + done + my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'` + + save_mkdir_p_IFS="$IFS"; IFS=':' + for my_dir in $my_dir_list; do + IFS="$save_mkdir_p_IFS" + # mkdir can fail with a `File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$my_dir" 2>/dev/null || : + done + IFS="$save_mkdir_p_IFS" + + # Bail out if we (or some other process) failed to create a directory. + test -d "$my_directory_path" || \ + func_fatal_error "Failed to create \`$1'" + fi +} + + +# func_mktempdir [string] +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, STRING is the basename for that directory. +func_mktempdir () +{ + my_template="${TMPDIR-/tmp}/${1-$progname}" + + if test "$opt_dry_run" = ":"; then + # Return a directory name, but don't create it in dry-run mode + my_tmpdir="${my_template}-$$" + else + + # If mktemp works, use that first and foremost + my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` + + if test ! -d "$my_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + my_tmpdir="${my_template}-${RANDOM-0}$$" + + save_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$my_tmpdir" + umask $save_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$my_tmpdir" || \ + func_fatal_error "cannot create temporary directory \`$my_tmpdir'" + fi + + $ECHO "$my_tmpdir" +} + + +# func_quote_for_eval arg +# Aesthetically quote ARG to be evaled later. +# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT +# is double-quoted, suitable for a subsequent eval, whereas +# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters +# which are still active within double quotes backslashified. +func_quote_for_eval () +{ + case $1 in + *[\\\`\"\$]*) + func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;; + *) + func_quote_for_eval_unquoted_result="$1" ;; + esac + + case $func_quote_for_eval_unquoted_result in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and and variable + # expansion for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" + ;; + *) + func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" + esac +} + + +# func_quote_for_expand arg +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + case $1 in + *[\\\`\"]*) + my_arg=`$ECHO "$1" | $SED \ + -e "$double_quote_subst" -e "$sed_double_backslash"` ;; + *) + my_arg="$1" ;; + esac + + case $my_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + my_arg="\"$my_arg\"" + ;; + esac + + func_quote_for_expand_result="$my_arg" +} + + +# func_show_eval cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$my_cmd" + my_status=$? + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + + +# func_show_eval_locale cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$lt_user_locale + $my_cmd" + my_status=$? + eval "$lt_safe_locale" + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + +# func_tr_sh +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_version +# Echo version message to standard output and exit. +func_version () +{ + $opt_debug + + $SED -n '/(C)/!b go + :more + /\./!{ + N + s/\n# / / + b more + } + :go + /^# '$PROGRAM' (GNU /,/# warranty; / { + s/^# // + s/^# *$// + s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ + p + }' < "$progpath" + exit $? +} + +# func_usage +# Echo short help message to standard output and exit. +func_usage () +{ + $opt_debug + + $SED -n '/^# Usage:/,/^# *.*--help/ { + s/^# // + s/^# *$// + s/\$progname/'$progname'/ + p + }' < "$progpath" + echo + $ECHO "run \`$progname --help | more' for full usage" + exit $? +} + +# func_help [NOEXIT] +# Echo long help message to standard output and exit, +# unless 'noexit' is passed as argument. +func_help () +{ + $opt_debug + + $SED -n '/^# Usage:/,/# Report bugs to/ { + :print + s/^# // + s/^# *$// + s*\$progname*'$progname'* + s*\$host*'"$host"'* + s*\$SHELL*'"$SHELL"'* + s*\$LTCC*'"$LTCC"'* + s*\$LTCFLAGS*'"$LTCFLAGS"'* + s*\$LD*'"$LD"'* + s/\$with_gnu_ld/'"$with_gnu_ld"'/ + s/\$automake_version/'"`(${AUTOMAKE-automake} --version) 2>/dev/null |$SED 1q`"'/ + s/\$autoconf_version/'"`(${AUTOCONF-autoconf} --version) 2>/dev/null |$SED 1q`"'/ + p + d + } + /^# .* home page:/b print + /^# General help using/b print + ' < "$progpath" + ret=$? + if test -z "$1"; then + exit $ret + fi +} + +# func_missing_arg argname +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $opt_debug + + func_error "missing argument for $1." + exit_cmd=exit +} + + +# func_split_short_opt shortopt +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +func_split_short_opt () +{ + my_sed_short_opt='1s/^\(..\).*$/\1/;q' + my_sed_short_rest='1s/^..\(.*\)$/\1/;q' + + func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"` + func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"` +} # func_split_short_opt may be replaced by extended shell implementation + + +# func_split_long_opt longopt +# Set func_split_long_opt_name and func_split_long_opt_arg shell +# variables after splitting LONGOPT at the `=' sign. +func_split_long_opt () +{ + my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q' + my_sed_long_arg='1s/^--[^=]*=//' + + func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"` + func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"` +} # func_split_long_opt may be replaced by extended shell implementation + +exit_cmd=: + + + + + +magic="%%%MAGIC variable%%%" +magic_exe="%%%MAGIC EXE variable%%%" + +# Global variables. +nonopt= +preserve_args= +lo2o="s/\\.lo\$/.${objext}/" +o2lo="s/\\.${objext}\$/.lo/" +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "${1}=\$${1}\${2}" +} # func_append may be replaced by extended shell implementation + +# func_append_quoted var value +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +func_append_quoted () +{ + func_quote_for_eval "${2}" + eval "${1}=\$${1}\\ \$func_quote_for_eval_result" +} # func_append_quoted may be replaced by extended shell implementation + + +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=`expr "${@}"` +} # func_arith may be replaced by extended shell implementation + + +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len` +} # func_len may be replaced by extended shell implementation + + +# func_lo2o object +func_lo2o () +{ + func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +} # func_lo2o may be replaced by extended shell implementation + + +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +} # func_xform may be replaced by extended shell implementation + + +# func_fatal_configuration arg... +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func_error ${1+"$@"} + func_error "See the $PACKAGE documentation for more information." + func_fatal_error "Fatal configuration error." +} + + +# func_config +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + +# func_features +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test "$build_libtool_libs" = yes; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test "$build_old_libs" = yes; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + +# func_enable_tag tagname +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname="$1" + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf="/$re_begincf/,/$re_endcf/p" + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + +# func_check_version_match +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# Shorthand for --mode=foo, only valid as the first argument +case $1 in +clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; +compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; +execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; +finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; +install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; +link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; +uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; +esac + + + +# Option defaults: +opt_debug=: +opt_dry_run=false +opt_config=false +opt_preserve_dup_deps=false +opt_features=false +opt_finish=false +opt_help=false +opt_help_all=false +opt_silent=: +opt_warning=: +opt_verbose=: +opt_silent=false +opt_verbose=false + + +# Parse options once, thoroughly. This comes as soon as possible in the +# script to make things like `--version' happen as quickly as we can. +{ + # this just eases exit handling + while test $# -gt 0; do + opt="$1" + shift + case $opt in + --debug|-x) opt_debug='set -x' + func_echo "enabling shell trace mode" + $opt_debug + ;; + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + --config) + opt_config=: +func_config + ;; + --dlopen|-dlopen) + optarg="$1" + opt_dlopen="${opt_dlopen+$opt_dlopen +}$optarg" + shift + ;; + --preserve-dup-deps) + opt_preserve_dup_deps=: + ;; + --features) + opt_features=: +func_features + ;; + --finish) + opt_finish=: +set dummy --mode finish ${1+"$@"}; shift + ;; + --help) + opt_help=: + ;; + --help-all) + opt_help_all=: +opt_help=': help-all' + ;; + --mode) + test $# = 0 && func_missing_arg $opt && break + optarg="$1" + opt_mode="$optarg" +case $optarg in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $opt" + exit_cmd=exit + break + ;; +esac + shift + ;; + --no-silent|--no-quiet) + opt_silent=false +func_append preserve_args " $opt" + ;; + --no-warning|--no-warn) + opt_warning=false +func_append preserve_args " $opt" + ;; + --no-verbose) + opt_verbose=false +func_append preserve_args " $opt" + ;; + --silent|--quiet) + opt_silent=: +func_append preserve_args " $opt" + opt_verbose=false + ;; + --verbose|-v) + opt_verbose=: +func_append preserve_args " $opt" +opt_silent=false + ;; + --tag) + test $# = 0 && func_missing_arg $opt && break + optarg="$1" + opt_tag="$optarg" +func_append preserve_args " $opt $optarg" +func_enable_tag "$optarg" + shift + ;; + + -\?|-h) func_usage ;; + --help) func_help ;; + --version) func_version ;; + + # Separate optargs to long options: + --*=*) + func_split_long_opt "$opt" + set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-n*|-v*) + func_split_short_opt "$opt" + set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) break ;; + -*) func_fatal_help "unrecognized option \`$opt'" ;; + *) set dummy "$opt" ${1+"$@"}; shift; break ;; + esac + done + + # Validate options: + + # save first non-option argument + if test "$#" -gt 0; then + nonopt="$opt" + shift + fi + + # preserve --debug + test "$opt_debug" = : || func_append preserve_args " --debug" + + case $host in + *cygwin* | *mingw* | *pw32* | *cegcc*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then + func_fatal_configuration "not configured to build any kind of library" + fi + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test "$opt_mode" != execute; then + func_error "unrecognized option \`-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help="$help" + help="Try \`$progname --help --mode=$opt_mode' for more information." + } + + + # Bail if the options were screwed + $exit_cmd $EXIT_FAILURE +} + + + + +## ----------- ## +## Main. ## +## ----------- ## + +# func_lalib_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null \ + | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if `file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case "$lalib_p_line" in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test "$lalib_p" = yes +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + func_lalib_p "$1" +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $opt_debug + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$save_ifs + eval cmd=\"$cmd\" + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# `FILE.' does not work on cygwin managed mounts. +func_source () +{ + $opt_debug + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case "$lt_sysroot:$1" in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result="=$func_stripname_result" + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $opt_debug + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with \`--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=${1} + if test "$build_libtool_libs" = yes; then + write_lobj=\'${2}\' + else + write_lobj=none + fi + + if test "$build_old_libs" = yes; then + write_oldobj=\'${3}\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$lt_sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $opt_debug + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result="" + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result" ; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result" + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $opt_debug + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $opt_debug + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $opt_debug + if test -z "$2" && test -n "$1" ; then + func_error "Could not determine host file name corresponding to" + func_error " \`$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result="$1" + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $opt_debug + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " \`$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result="$3" + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $opt_debug + case $4 in + $1 ) func_to_host_path_result="$3$func_to_host_path_result" + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via `$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $opt_debug + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $opt_debug + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result="$1" +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result="$func_convert_core_msys_to_w32_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result="$func_convert_core_file_wine_to_w32_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result="$func_cygpath_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result="$func_cygpath_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via `$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $opt_debug + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd="func_convert_path_${func_stripname_result}" + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $opt_debug + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result="$1" +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result="$func_convert_core_msys_to_w32_result" + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result="$func_convert_core_path_wine_to_w32_result" + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result="$func_cygpath_result" + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result="$func_cygpath_result" + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_mode_compile arg... +func_mode_compile () +{ + $opt_debug + # Get the compilation command and the source file. + base_compile= + srcfile="$nonopt" # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg="$arg" + arg_mode=normal + ;; + + target ) + libobj="$arg" + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify \`-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs="$IFS"; IFS=',' + for arg in $args; do + IFS="$save_ifs" + func_append_quoted lastarg "$arg" + done + IFS="$save_ifs" + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg="$srcfile" + srcfile="$arg" + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with \`-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj="$func_basename_result" + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from \`$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name \`$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname="$func_basename_result" + xdir="$func_dirname_result" + lobj=${xdir}$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test "$build_old_libs" = yes; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test "$compiler_c_o" = no; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext} + lockfile="$output_obj.lock" + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test "$need_locks" = yes; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test "$need_locks" = warn; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test "$build_libtool_libs" = yes; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test "$pic_mode" != no; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test "$suppress_opt" = yes; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test "$build_old_libs" = yes; then + if test "$pic_mode" != yes; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test "$compiler_c_o" = yes; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test "$need_locks" != no; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test "$opt_mode" = compile && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a \`.o' file suitable for static linking + -static only build a \`.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to \`-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with \`-') are ignored. + +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. + +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode \`$opt_mode'" + ;; + esac + + echo + $ECHO "Try \`$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test "$opt_help" = :; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | sed -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + sed '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $opt_debug + # The first argument is the command name. + cmd="$nonopt" + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "\`$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "\`$file' was not linked with \`-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir="$func_dirname_result" + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir="$func_dirname_result" + ;; + + *) + func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file="$progdir/$program" + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if test "X$opt_dry_run" = Xfalse; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + fi +} + +test "$opt_mode" = execute && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $opt_debug + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "\`$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument \`$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and \`=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_silent && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the \`-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the \`$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the \`$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test "$opt_mode" = finish && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $opt_debug + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac; then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test "x$prev" = x-m && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the \`$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir="$func_dirname_result" + destname="$func_basename_result" + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "\`$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "\`$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir="$func_dirname_result" + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking \`$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname="$1" + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme="$stripme" + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme="" + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib="$destdir/$realname" + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name="$func_basename_result" + instname="$dir/$name"i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to \`$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script \`$wrapper'" + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "\`$lib' has not been installed in \`$libdir'" + finalize=no + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + $opt_dry_run || { + if test "$finalize" = yes; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file="$func_basename_result" + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_silent || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink \`$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file="$outputname" + else + func_warning "cannot relink \`$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name="$func_basename_result" + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run \`$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test "$opt_mode" = install && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $opt_debug + my_outputname="$1" + my_originator="$2" + my_pic_p="${3-no}" + my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms="${my_outputname}S.c" + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${my_outputname}.nm" + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + func_verbose "generating symbol list for \`$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from \`$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $opt_dry_run || { + $RM $export_symbols + eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from \`$dlprefile'" + func_basename "$dlprefile" + name="$func_basename_result" + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename="" + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname" ; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename="$func_basename_result" + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename" ; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[]; +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{\ + { \"$my_originator\", (void *) 0 }," + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + if test "X$my_pic_p" != Xno; then + pic_flag_for_symtable=" $pic_flag" + fi + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' + + # Transform the symbol file into the correct name. + symfileobj="$output_objdir/${my_outputname}S.$objext" + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for \`$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $opt_debug + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s,.*,import, + p + q + } + }'` + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $opt_debug + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $opt_debug + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive which possess that section. Heuristic: eliminate + # all those which have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $opt_debug + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $opt_debug + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $opt_debug + if func_cygming_gnu_implib_p "$1" ; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1" ; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result="" + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $opt_debug + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + if test "$lock_old_archive_extraction" = yes; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test "$lock_old_archive_extraction" = yes; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $opt_debug + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib="$func_basename_result" + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir="$my_gentop/$my_xlib_u" + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`basename "$darwin_archive"` + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result="$my_oldobjs" +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory in which it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ which is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options which match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +/* declarations of non-ANSI functions */ +#if defined(__MINGW32__) +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined(__CYGWIN__) +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined (other platforms) ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined(_MSC_VER) +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +# ifndef _INTPTR_T_DEFINED +# define _INTPTR_T_DEFINED +# define intptr_t int +# endif +#elif defined(__MINGW32__) +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined(__CYGWIN__) +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined (other platforms) ... */ +#endif + +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) + +#if defined(LT_DEBUGWRAPPER) +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + int tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = q - p; + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (strcmp (str, pat) == 0) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + int len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + int orig_value_len = strlen (orig_value); + int add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + int len = strlen (new_value); + while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[len-1] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $opt_debug + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $opt_debug + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # which system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll which has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=no + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module="${wl}-single_module" + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir="$arg" + prev= + continue + ;; + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + test -f "$arg" \ + || func_fatal_error "symbol file \`$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file \`$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; + release) + release="-$arg" + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg="$arg" + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "\`-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between \`-L' and \`$1'" + else + func_fatal_error "need path for \`-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of \`$dir'" + dir="$absdir" + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test "X$arg" = "X-lc" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module="${wl}-multi_module" + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "\`-no-install' is ignored for $host" + func_warning "assuming \`-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-flto*|-fwhopr*|-fuse-linker-plugin) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the \`$prevarg' option requires an argument" + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname="$func_basename_result" + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + func_dirname "$output" "/" "" + output_objdir="$func_dirname_result$objdir" + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps ; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test "$linkmode,$pass" = "lib,link"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs="$tmp_deplibs" + fi + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; + esac + fi + if test "$linkmode,$pass" = "lib,dlpreopen"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs="$dlprefiles" + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + func_warning "\`-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test "$linkmode" = lib; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + *.ltframework) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "\`-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + else + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + ;; + esac + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + + if test "$found" = yes || test -f "$lib"; then : + else + func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" + fi + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "\`$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + elif test "$linkmode" != prog && test "$linkmode" != lib; then + func_fatal_error "\`$lib' is not a convenience library" + fi + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test "$prefer_static_libs" = yes || + test "$prefer_static_libs,$installed" = "built,no"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib="$l" + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + func_fatal_error "cannot -dlopen a convenience library: \`$lib'" + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of \`$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir="$ladir" + fi + ;; + esac + func_basename "$lib" + laname="$func_basename_result" + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library \`$lib' was moved." + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$lt_sysroot$libdir" + absdir="$lt_sysroot$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir" && test "$linkmode" = prog; then + func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" + fi + case "$host" in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { { test "$prefer_static_libs" = no || + test "$prefer_static_libs,$installed" = "built,yes"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath:" in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test "$use_static_libs" = built && test "$installed" = yes; then + use_static_libs=no + fi + if test -n "$library_names" && + { test "$use_static_libs" = no || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test "$installed" = no; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule="" + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule="$dlpremoduletest" + break + fi + done + if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then + echo + if test "$linkmode" = prog; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname="$1" + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc*) + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + func_basename "$soroot" + soname="$func_basename_result" + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from \`$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for \`$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$opt_mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; + *-*-sysv4*uw2*) add_dir="-L$dir" ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we can not + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null ; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + elif test -n "$old_library"; then + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$absdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && + test "$hardcode_minus_L" != yes && + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$opt_mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system can not link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path="$deplib" ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of \`$dir'" + absdir="$dir" + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl" ; then + depdepl="$absdir/$objdir/$depdepl" + darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" + func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}" + path= + fi + fi + ;; + *) + path="-L$absdir/$objdir" + ;; + esac + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "\`$deplib' seems to be moved" + + path="-L$absdir" + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test "$pass" = link; then + if test "$linkmode" = "prog"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + fi + if test "$linkmode" = prog || test "$linkmode" = lib; then + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "\`-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "\`-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test "$module" = no && \ + func_fatal_help "libtool library \`$output' must begin with \`lib'" + + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test "$dlself" != no && \ + func_warning "\`-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test "$#" -gt 1 && \ + func_warning "ignoring multiple \`-rpath's for a libtool library" + + install_libdir="$1" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "\`-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + shift + IFS="$save_ifs" + + test -n "$7" && \ + func_fatal_help "too many parameters to \`-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$1" + number_minor="$2" + number_revision="$3" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|qnx|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_minor" + lt_irix_increment=no + ;; + esac + ;; + no) + current="$1" + revision="$2" + age="$3" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT \`$current' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION \`$revision' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE \`$age' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE \`$age' is greater than the current interface number \`$current'" + func_fatal_error "\`$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current" + ;; + + irix | nonstopux) + if test "X$lt_irix_increment" = "Xno"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + func_append verstring ":${current}.0" + ;; + + qnx) + major=".$current" + versuffix=".$current" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + + *) + func_fatal_configuration "unknown library version type \`$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + func_warning "undefined symbols not allowed in $host shared libraries" + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + + fi + + func_generate_dlsyms "$libname" "$libname" "yes" + func_append libobjs " $symfileobj" + test "X$libobjs" = "X " && libobjs= + + if test "$opt_mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test "X$deplibs_check_method" = "Xnone"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs="$new_libs" + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + # Remove ${wl} instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$opt_mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" + test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname="$1" + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib="$output_objdir/$realname" + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols="$output_objdir/$libname.uexp" + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + if test "x`$SED 1q $export_symbols`" != xEXPORTS; then + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols="$export_symbols" + export_symbols= + always_export_symbols=yes + fi + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd1 in $cmds; do + IFS="$save_ifs" + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test "$try_normal_branch" = yes \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=${output_objdir}/${output_la}.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test "$compiler_needs_object" = yes && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test "$opt_mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test "X$skipped_export" != "X:" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then + output=${output_objdir}/${output_la}.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then + output=${output_objdir}/${output_la}.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test "$compiler_needs_object" = yes; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-${k}.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test "X$objlist" = X || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-${k}.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\${concat_cmds}$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + if ${skipped_export-false}; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + fi + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + if ${skipped_export-false}; then + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + fi + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "\`-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object \`$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj="$output" + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec and hope we can get by with + # turning comma into space.. + wl= + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + else + gentop="$output_objdir/${obj}x" + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test "$build_libtool_libs" != yes && libobjs="$non_pic_objects" + + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + + output="$obj" + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "\`-release' is ignored for programs" + + test "$preload" = yes \ + && test "$dlopen_support" = unknown \ + && test "$dlopen_self" = unknown \ + && test "$dlopen_self_static" = unknown && \ + func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test "$tagname" = CXX ; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " ${wl}-bind_at_load" + func_append finalize_command " ${wl}-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs="$new_libs" + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" "no" + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=yes + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=no + ;; + *cygwin* | *mingw* ) + if test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + *) + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + esac + if test "$wrappers_required" = no; then + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.${objext}"; then + func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' + fi + + exit $exit_status + fi + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "\`$output' will be relinked during installation" + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi + else + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + fi + fi + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource="$output_path/$objdir/lt-$output_name.c" + cwrapper="$output_path/$output_name.exe" + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host" ; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save $symfileobj" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + if test "$preload" = yes && test -f "$symfileobj"; then + func_append oldobjs " $symfileobj" + fi + fi + addlibs="$old_convenience" + fi + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase="$func_basename_result" + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name="$func_basename_result" + func_resolve_sysroot "$deplib" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles="$newdlprefiles" + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test "x$bindir" != x ; + then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that can not go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +{ test "$opt_mode" = link || test "$opt_mode" = relink; } && + func_mode_link ${1+"$@"} + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $opt_debug + RM="$nonopt" + files= + rmforce= + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=yes ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir="$func_dirname_result" + if test "X$dir" = X.; then + odir="$objdir" + else + odir="$dir/$objdir" + fi + func_basename "$file" + name="$func_basename_result" + test "$opt_mode" = uninstall && odir="$dir" + + # Remember odir for removal later, being careful to avoid duplicates + if test "$opt_mode" = clean; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif test "$rmforce" = yes; then + continue + fi + + rmfiles="$file" + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case "$opt_mode" in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && + test "$pic_object" != none; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && + test "$non_pic_object" != none; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test "$opt_mode" = clean ; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then + func_append rmfiles " $odir/lt-${noexename}.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } && + func_mode_uninstall ${1+"$@"} + +test -z "$opt_mode" && { + help="$generic_help" + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode \`$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# in which we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: +# vi:sw=2 + diff --git a/build/aux/missing b/build/aux/missing new file mode 100755 index 00000000..cdea5149 --- /dev/null +++ b/build/aux/missing @@ -0,0 +1,215 @@ +#! /bin/sh +# Common wrapper for a few potentially missing GNU programs. + +scriptversion=2012-06-26.16; # UTC + +# Copyright (C) 1996-2013 Free Software Foundation, Inc. +# Originally written by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try '$0 --help' for more information" + exit 1 +fi + +case $1 in + + --is-lightweight) + # Used by our autoconf macros to check whether the available missing + # script is modern enough. + exit 0 + ;; + + --run) + # Back-compat with the calling convention used by older automake. + shift + ;; + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due +to PROGRAM being missing or too old. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + +Supported PROGRAM values: + aclocal autoconf autoheader autom4te automake makeinfo + bison yacc flex lex help2man + +Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and +'g' are ignored when checking the name. + +Send bug reports to ." + exit $? + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + exit $? + ;; + + -*) + echo 1>&2 "$0: unknown '$1' option" + echo 1>&2 "Try '$0 --help' for more information" + exit 1 + ;; + +esac + +# Run the given program, remember its exit status. +"$@"; st=$? + +# If it succeeded, we are done. +test $st -eq 0 && exit 0 + +# Also exit now if we it failed (or wasn't found), and '--version' was +# passed; such an option is passed most likely to detect whether the +# program is present and works. +case $2 in --version|--help) exit $st;; esac + +# Exit code 63 means version mismatch. This often happens when the user +# tries to use an ancient version of a tool on a file that requires a +# minimum version. +if test $st -eq 63; then + msg="probably too old" +elif test $st -eq 127; then + # Program was missing. + msg="missing on your system" +else + # Program was found and executed, but failed. Give up. + exit $st +fi + +perl_URL=http://www.perl.org/ +flex_URL=http://flex.sourceforge.net/ +gnu_software_URL=http://www.gnu.org/software + +program_details () +{ + case $1 in + aclocal|automake) + echo "The '$1' program is part of the GNU Automake package:" + echo "<$gnu_software_URL/automake>" + echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" + echo "<$gnu_software_URL/autoconf>" + echo "<$gnu_software_URL/m4/>" + echo "<$perl_URL>" + ;; + autoconf|autom4te|autoheader) + echo "The '$1' program is part of the GNU Autoconf package:" + echo "<$gnu_software_URL/autoconf/>" + echo "It also requires GNU m4 and Perl in order to run:" + echo "<$gnu_software_URL/m4/>" + echo "<$perl_URL>" + ;; + esac +} + +give_advice () +{ + # Normalize program name to check for. + normalized_program=`echo "$1" | sed ' + s/^gnu-//; t + s/^gnu//; t + s/^g//; t'` + + printf '%s\n' "'$1' is $msg." + + configure_deps="'configure.ac' or m4 files included by 'configure.ac'" + case $normalized_program in + autoconf*) + echo "You should only need it if you modified 'configure.ac'," + echo "or m4 files included by it." + program_details 'autoconf' + ;; + autoheader*) + echo "You should only need it if you modified 'acconfig.h' or" + echo "$configure_deps." + program_details 'autoheader' + ;; + automake*) + echo "You should only need it if you modified 'Makefile.am' or" + echo "$configure_deps." + program_details 'automake' + ;; + aclocal*) + echo "You should only need it if you modified 'acinclude.m4' or" + echo "$configure_deps." + program_details 'aclocal' + ;; + autom4te*) + echo "You might have modified some maintainer files that require" + echo "the 'automa4te' program to be rebuilt." + program_details 'autom4te' + ;; + bison*|yacc*) + echo "You should only need it if you modified a '.y' file." + echo "You may want to install the GNU Bison package:" + echo "<$gnu_software_URL/bison/>" + ;; + lex*|flex*) + echo "You should only need it if you modified a '.l' file." + echo "You may want to install the Fast Lexical Analyzer package:" + echo "<$flex_URL>" + ;; + help2man*) + echo "You should only need it if you modified a dependency" \ + "of a man page." + echo "You may want to install the GNU Help2man package:" + echo "<$gnu_software_URL/help2man/>" + ;; + makeinfo*) + echo "You should only need it if you modified a '.texi' file, or" + echo "any other file indirectly affecting the aspect of the manual." + echo "You might want to install the Texinfo package:" + echo "<$gnu_software_URL/texinfo/>" + echo "The spurious makeinfo call might also be the consequence of" + echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" + echo "want to install GNU make:" + echo "<$gnu_software_URL/make/>" + ;; + *) + echo "You might have modified some files without having the proper" + echo "tools for further handling them. Check the 'README' file, it" + echo "often tells you about the needed prerequisites for installing" + echo "this package. You may also peek at any GNU archive site, in" + echo "case some other package contains this missing '$1' program." + ;; + esac +} + +give_advice "$1" | sed -e '1s/^/WARNING: /' \ + -e '2,$s/^/ /' >&2 + +# Propagate the correct exit status (expected to be 127 for a program +# not found, 63 for a program that failed due to version mismatch). +exit $st + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/build/aux/test-driver b/build/aux/test-driver new file mode 100755 index 00000000..32bf39e8 --- /dev/null +++ b/build/aux/test-driver @@ -0,0 +1,127 @@ +#! /bin/sh +# test-driver - basic testsuite driver script. + +scriptversion=2012-06-27.10; # UTC + +# Copyright (C) 2011-2013 Free Software Foundation, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +# Make unconditional expansion of undefined variables an error. This +# helps a lot in preventing typo-related bugs. +set -u + +usage_error () +{ + echo "$0: $*" >&2 + print_usage >&2 + exit 2 +} + +print_usage () +{ + cat <$log_file 2>&1 +estatus=$? +if test $enable_hard_errors = no && test $estatus -eq 99; then + estatus=1 +fi + +case $estatus:$expect_failure in + 0:yes) col=$red res=XPASS recheck=yes gcopy=yes;; + 0:*) col=$grn res=PASS recheck=no gcopy=no;; + 77:*) col=$blu res=SKIP recheck=no gcopy=yes;; + 99:*) col=$mgn res=ERROR recheck=yes gcopy=yes;; + *:yes) col=$lgn res=XFAIL recheck=no gcopy=yes;; + *:*) col=$red res=FAIL recheck=yes gcopy=yes;; +esac + +# Report outcome to console. +echo "${col}${res}${std}: $test_name" + +# Register the test result, and other relevant metadata. +echo ":test-result: $res" > $trs_file +echo ":global-test-result: $res" >> $trs_file +echo ":recheck: $recheck" >> $trs_file +echo ":copy-in-global-log: $gcopy" >> $trs_file + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/build/aux/ylwrap b/build/aux/ylwrap new file mode 100755 index 00000000..1c4d7761 --- /dev/null +++ b/build/aux/ylwrap @@ -0,0 +1,249 @@ +#! /bin/sh +# ylwrap - wrapper for lex/yacc invocations. + +scriptversion=2012-12-21.17; # UTC + +# Copyright (C) 1996-2013 Free Software Foundation, Inc. +# +# Written by Tom Tromey . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +get_dirname () +{ + case $1 in + */*|*\\*) printf '%s\n' "$1" | sed -e 's|\([\\/]\)[^\\/]*$|\1|';; + # Otherwise, we want the empty string (not "."). + esac +} + +# guard FILE +# ---------- +# The CPP macro used to guard inclusion of FILE. +guard() +{ + printf '%s\n' "$1" \ + | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^ABCDEFGHIJKLMNOPQRSTUVWXYZ]/_/g' \ + -e 's/__*/_/g' +} + +# quote_for_sed [STRING] +# ---------------------- +# Return STRING (or stdin) quoted to be used as a sed pattern. +quote_for_sed () +{ + case $# in + 0) cat;; + 1) printf '%s\n' "$1";; + esac \ + | sed -e 's|[][\\.*]|\\&|g' +} + +case "$1" in + '') + echo "$0: No files given. Try '$0 --help' for more information." 1>&2 + exit 1 + ;; + --basedir) + basedir=$2 + shift 2 + ;; + -h|--h*) + cat <<\EOF +Usage: ylwrap [--help|--version] INPUT [OUTPUT DESIRED]... -- PROGRAM [ARGS]... + +Wrapper for lex/yacc invocations, renaming files as desired. + + INPUT is the input file + OUTPUT is one file PROG generates + DESIRED is the file we actually want instead of OUTPUT + PROGRAM is program to run + ARGS are passed to PROG + +Any number of OUTPUT,DESIRED pairs may be used. + +Report bugs to . +EOF + exit $? + ;; + -v|--v*) + echo "ylwrap $scriptversion" + exit $? + ;; +esac + + +# The input. +input="$1" +shift +# We'll later need for a correct munging of "#line" directives. +input_sub_rx=`get_dirname "$input" | quote_for_sed` +case "$input" in + [\\/]* | ?:[\\/]*) + # Absolute path; do nothing. + ;; + *) + # Relative path. Make it absolute. + input="`pwd`/$input" + ;; +esac +input_rx=`get_dirname "$input" | quote_for_sed` + +# Since DOS filename conventions don't allow two dots, +# the DOS version of Bison writes out y_tab.c instead of y.tab.c +# and y_tab.h instead of y.tab.h. Test to see if this is the case. +y_tab_nodot=false +if test -f y_tab.c || test -f y_tab.h; then + y_tab_nodot=true +fi + +# The parser itself, the first file, is the destination of the .y.c +# rule in the Makefile. +parser=$1 + +# A sed program to s/FROM/TO/g for all the FROM/TO so that, for +# instance, we rename #include "y.tab.h" into #include "parse.h" +# during the conversion from y.tab.c to parse.c. +sed_fix_filenames= + +# Also rename header guards, as Bison 2.7 for instance uses its header +# guard in its implementation file. +sed_fix_header_guards= + +while test "$#" -ne 0; do + if test "$1" = "--"; then + shift + break + fi + from=$1 + # Handle y_tab.c and y_tab.h output by DOS + if $y_tab_nodot; then + case $from in + "y.tab.c") from=y_tab.c;; + "y.tab.h") from=y_tab.h;; + esac + fi + shift + to=$1 + shift + sed_fix_filenames="${sed_fix_filenames}s|"`quote_for_sed "$from"`"|$to|g;" + sed_fix_header_guards="${sed_fix_header_guards}s|"`guard "$from"`"|"`guard "$to"`"|g;" +done + +# The program to run. +prog="$1" +shift +# Make any relative path in $prog absolute. +case "$prog" in + [\\/]* | ?:[\\/]*) ;; + *[\\/]*) prog="`pwd`/$prog" ;; +esac + +# FIXME: add hostname here for parallel makes that run commands on +# other machines. But that might take us over the 14-char limit. +dirname=ylwrap$$ +do_exit="cd '`pwd`' && rm -rf $dirname > /dev/null 2>&1;"' (exit $ret); exit $ret' +trap "ret=129; $do_exit" 1 +trap "ret=130; $do_exit" 2 +trap "ret=141; $do_exit" 13 +trap "ret=143; $do_exit" 15 +mkdir $dirname || exit 1 + +cd $dirname + +case $# in + 0) "$prog" "$input" ;; + *) "$prog" "$@" "$input" ;; +esac +ret=$? + +if test $ret -eq 0; then + for from in * + do + to=`printf '%s\n' "$from" | sed "$sed_fix_filenames"` + if test -f "$from"; then + # If $2 is an absolute path name, then just use that, + # otherwise prepend '../'. + case $to in + [\\/]* | ?:[\\/]*) target=$to;; + *) target="../$to";; + esac + + # Do not overwrite unchanged header files to avoid useless + # recompilations. Always update the parser itself: it is the + # destination of the .y.c rule in the Makefile. Divert the + # output of all other files to a temporary file so we can + # compare them to existing versions. + if test $from != $parser; then + realtarget="$target" + target=tmp-`printf '%s\n' "$target" | sed 's|.*[\\/]||g'` + fi + + # Munge "#line" or "#" directives. Don't let the resulting + # debug information point at an absolute srcdir. Use the real + # output file name, not yy.lex.c for instance. Adjust the + # include guards too. + sed -e "/^#/!b" \ + -e "s|$input_rx|$input_sub_rx|" \ + -e "$sed_fix_filenames" \ + -e "$sed_fix_header_guards" \ + "$from" >"$target" || ret=$? + + # Check whether files must be updated. + if test "$from" != "$parser"; then + if test -f "$realtarget" && cmp -s "$realtarget" "$target"; then + echo "$to is unchanged" + rm -f "$target" + else + echo "updating $to" + mv -f "$target" "$realtarget" + fi + fi + else + # A missing file is only an error for the parser. This is a + # blatant hack to let us support using "yacc -d". If -d is not + # specified, don't fail when the header file is "missing". + if test "$from" = "$parser"; then + ret=1 + fi + fi + done +fi + +# Remove the directory. +cd .. +rm -rf $dirname + +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/build/ax_boost_base.m4 b/build/ax_boost_base.m4 new file mode 100644 index 00000000..54a2a1be --- /dev/null +++ b/build/ax_boost_base.m4 @@ -0,0 +1,258 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_boost_base.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# +# DESCRIPTION +# +# Test for the Boost C++ libraries of a particular version (or newer) +# +# If no path to the installed boost library is given the macro searchs +# under /usr, /usr/local, /opt and /opt/local and evaluates the +# $BOOST_ROOT environment variable. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS) +# +# And sets: +# +# HAVE_BOOST +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2009 Peter Adolphs +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 20 + +AC_DEFUN([AX_BOOST_BASE], +[ +AC_ARG_WITH([boost], + [AS_HELP_STRING([--with-boost@<:@=ARG@:>@], + [use Boost library from a standard location (ARG=yes), + from the specified location (ARG=), + or disable it (ARG=no) + @<:@ARG=yes@:>@ ])], + [ + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ac_boost_path="" + else + want_boost="yes" + ac_boost_path="$withval" + fi + ], + [want_boost="yes"]) + + +AC_ARG_WITH([boost-libdir], + AS_HELP_STRING([--with-boost-libdir=LIB_DIR], + [Force given directory for boost libraries. Note that this will override library path detection, so use this parameter only if default library detection fails and you know exactly where your boost libraries are located.]), + [ + if test -d "$withval" + then + ac_boost_lib_path="$withval" + else + AC_MSG_ERROR(--with-boost-libdir expected directory name) + fi + ], + [ac_boost_lib_path=""] +) + +if test "x$want_boost" = "xyes"; then + boost_lib_version_req=ifelse([$1], ,1.20.0,$1) + boost_lib_version_req_shorten=`expr $boost_lib_version_req : '\([[0-9]]*\.[[0-9]]*\)'` + boost_lib_version_req_major=`expr $boost_lib_version_req : '\([[0-9]]*\)'` + boost_lib_version_req_minor=`expr $boost_lib_version_req : '[[0-9]]*\.\([[0-9]]*\)'` + boost_lib_version_req_sub_minor=`expr $boost_lib_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` + if test "x$boost_lib_version_req_sub_minor" = "x" ; then + boost_lib_version_req_sub_minor="0" + fi + WANT_BOOST_VERSION=`expr $boost_lib_version_req_major \* 100000 \+ $boost_lib_version_req_minor \* 100 \+ $boost_lib_version_req_sub_minor` + AC_MSG_CHECKING(for boostlib >= $boost_lib_version_req) + succeeded=no + + dnl On 64-bit systems check for system libraries in both lib64 and lib. + dnl The former is specified by FHS, but e.g. Debian does not adhere to + dnl this (as it rises problems for generic multi-arch support). + dnl The last entry in the list is chosen by default when no libraries + dnl are found, e.g. when only header-only libraries are installed! + libsubdirs="lib" + ax_arch=`uname -m` + if test $ax_arch = x86_64 -o $ax_arch = ppc64 -o $ax_arch = s390x -o $ax_arch = sparc64; then + libsubdirs="lib64 lib lib64" + fi + + dnl first we check the system location for boost libraries + dnl this location ist chosen if boost libraries are installed with the --layout=system option + dnl or if you install boost with RPM + if test "$ac_boost_path" != ""; then + BOOST_CPPFLAGS="-I$ac_boost_path/include" + for ac_boost_path_tmp in $libsubdirs; do + if test -d "$ac_boost_path"/"$ac_boost_path_tmp" ; then + BOOST_LDFLAGS="-L$ac_boost_path/$ac_boost_path_tmp" + break + fi + done + elif test "$cross_compiling" != yes; then + for ac_boost_path_tmp in /usr /usr/local /opt /opt/local ; do + if test -d "$ac_boost_path_tmp/include/boost" && test -r "$ac_boost_path_tmp/include/boost"; then + for libsubdir in $libsubdirs ; do + if ls "$ac_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$ac_boost_path_tmp/$libsubdir" + BOOST_CPPFLAGS="-I$ac_boost_path_tmp/include" + break; + fi + done + fi + + dnl overwrite ld flags if we have required special directory with + dnl --with-boost-libdir parameter + if test "$ac_boost_lib_path" != ""; then + BOOST_LDFLAGS="-L$ac_boost_lib_path" + fi + + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_REQUIRE([AC_PROG_CXX]) + AC_LANG_PUSH(C++) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + @%:@include + ]], [[ + #if BOOST_VERSION >= $WANT_BOOST_VERSION + // Everything is okay + #else + # error Boost version is too old + #endif + ]])],[ + AC_MSG_RESULT(yes) + succeeded=yes + found_system=yes + ],[ + ]) + AC_LANG_POP([C++]) + + + + dnl if we found no boost with system layout we search for boost libraries + dnl built and installed without the --layout=system option or for a staged(not installed) version + if test "x$succeeded" != "xyes"; then + _version=0 + if test "$ac_boost_path" != ""; then + if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then + for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "$V_CHECK" = "1" ; then + _version=$_version_tmp + fi + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$ac_boost_path/include/boost-$VERSION_UNDERSCORE" + done + fi + else + if test "$cross_compiling" != yes; then + for ac_boost_path in /usr /usr/local /opt /opt/local ; do + if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then + for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "$V_CHECK" = "1" ; then + _version=$_version_tmp + best_path=$ac_boost_path + fi + done + fi + done + + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE" + if test "$ac_boost_lib_path" = ""; then + for libsubdir in $libsubdirs ; do + if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$best_path/$libsubdir" + fi + fi + + if test "x$BOOST_ROOT" != "x"; then + for libsubdir in $libsubdirs ; do + if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then + version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'` + stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'` + stage_version_shorten=`expr $stage_version : '\([[0-9]]*\.[[0-9]]*\)'` + V_CHECK=`expr $stage_version_shorten \>\= $_version` + if test "$V_CHECK" = "1" -a "$ac_boost_lib_path" = "" ; then + AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT) + BOOST_CPPFLAGS="-I$BOOST_ROOT" + BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir" + fi + fi + fi + fi + + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_LANG_PUSH(C++) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ + @%:@include + ]], [[ + #if BOOST_VERSION >= $WANT_BOOST_VERSION + // Everything is okay + #else + # error Boost version is too old + #endif + ]])],[ + AC_MSG_RESULT(yes) + succeeded=yes + found_system=yes + ],[ + ]) + AC_LANG_POP([C++]) + fi + + if test "$succeeded" != "yes" ; then + if test "$_version" = "0" ; then + AC_MSG_NOTICE([[We could not detect the boost libraries (version $boost_lib_version_req_shorten or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation.]]) + else + AC_MSG_NOTICE([Your boost libraries seems to old (version $_version).]) + fi + # execute ACTION-IF-NOT-FOUND (if present): + ifelse([$3], , :, [$3]) + else + AC_SUBST(BOOST_CPPFLAGS) + AC_SUBST(BOOST_LDFLAGS) + AC_DEFINE(HAVE_BOOST,,[define if the Boost library is available]) + # execute ACTION-IF-FOUND (if present): + ifelse([$2], , :, [$2]) + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" +fi + +]) diff --git a/build/ax_check_openssl.m4 b/build/ax_check_openssl.m4 new file mode 100644 index 00000000..b490c91f --- /dev/null +++ b/build/ax_check_openssl.m4 @@ -0,0 +1,136 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_check_openssl.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_OPENSSL([action-if-found[, action-if-not-found]]) +# +# DESCRIPTION +# +# Look for OpenSSL in a number of default spots, or in a user-selected +# spot (via --with-openssl). Sets +# +# OPENSSL_INCLUDES to the include directives required +# OPENSSL_LIBS to the -l directives required +# OPENSSL_LDFLAGS to the -L or -R flags required +# +# and calls ACTION-IF-FOUND or ACTION-IF-NOT-FOUND appropriately +# +# This macro sets OPENSSL_INCLUDES such that source files should use the +# openssl/ directory in include directives: +# +# #include +# +# LICENSE +# +# Copyright (c) 2009,2010 Zmanda Inc. +# Copyright (c) 2009,2010 Dustin J. Mitchell +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + +AU_ALIAS([CHECK_SSL], [AX_CHECK_OPENSSL]) +AC_DEFUN([AX_CHECK_OPENSSL], [ + found=false + overriden_with_ssl=false + AC_ARG_WITH([openssl], + [AS_HELP_STRING([--with-openssl=DIR], + [root of the OpenSSL directory])], + [ + case "$withval" in + "" | y | ye | yes | n | no) + AC_MSG_ERROR([Invalid --with-openssl value]) + ;; + *) ssldirs="$withval" + overriden_with_ssl=true + ;; + esac + ], [ + # if pkg-config is installed and openssl has installed a .pc file, + # then use that information and don't search ssldirs + AC_PATH_PROG([PKG_CONFIG], [pkg-config]) + if test x"$PKG_CONFIG" != x""; then + OPENSSL_LDFLAGS=`$PKG_CONFIG openssl --libs-only-L 2>/dev/null` + if test $? = 0; then + OPENSSL_LIBS=`$PKG_CONFIG openssl --libs-only-l 2>/dev/null` + OPENSSL_INCLUDES=`$PKG_CONFIG openssl --cflags-only-I 2>/dev/null` + found=true + fi + fi + + # no such luck; use some default ssldirs + if ! $found; then + ssldirs="/usr/local/ssl /usr/lib/ssl /usr/ssl /usr/pkg /usr/sfw /usr/local /opt/local /usr" + fi + ] + ) + + + # note that we #include , so the OpenSSL headers have to be in + # an 'openssl' subdirectory + + if ! $found; then + OPENSSL_INCLUDES= + for ssldir in $ssldirs; do + AC_MSG_CHECKING([for openssl/ssl.h in $ssldir]) + if test -f "$ssldir/include/openssl/ssl.h"; then + OPENSSL_INCLUDES="-I$ssldir/include" + if test -d "$ssldir/lib64"; then + OPENSSL_LDFLAGS="-L$ssldir/lib64" + if test "$overriden_with_ssl"; then + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-rpath ${ssldir}/lib64]) + fi + else + OPENSSL_LDFLAGS="-L$ssldir/lib" + if test "$overriden_with_ssl"; then + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-rpath ${ssldir}/lib]) + fi + fi + OPENSSL_LIBS="-lssl -lcrypto" + found=true + AC_MSG_RESULT([yes]) + break + else + AC_MSG_RESULT([no]) + fi + done + + # if the file wasn't found, well, go ahead and try the link anyway -- maybe + # it will just work! + fi + + # try the preprocessor and linker with our new flags, + # being careful not to pollute the global LIBS, LDFLAGS, and CPPFLAGS + + AC_MSG_CHECKING([whether compiling and linking against OpenSSL works]) + echo "Trying link with OPENSSL_LDFLAGS=$OPENSSL_LDFLAGS;" \ + "OPENSSL_LIBS=$OPENSSL_LIBS; OPENSSL_INCLUDES=$OPENSSL_INCLUDES" >&AS_MESSAGE_LOG_FD + + save_LIBS="$LIBS" + save_LDFLAGS="$LDFLAGS" + save_CPPFLAGS="$CPPFLAGS" + LDFLAGS="$LDFLAGS $OPENSSL_LDFLAGS" + LIBS="$OPENSSL_LIBS $LIBS" + CPPFLAGS="$OPENSSL_INCLUDES $CPPFLAGS" + AC_LINK_IFELSE( + [AC_LANG_PROGRAM([#include ], [SSL_new(NULL)])], + [ + AC_MSG_RESULT([yes]) + $1 + ], [ + AC_MSG_RESULT([no]) + $2 + ]) + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + + AC_SUBST([OPENSSL_INCLUDES]) + AC_SUBST([OPENSSL_LIBS]) + AC_SUBST([OPENSSL_LDFLAGS]) +]) diff --git a/build/ax_compare_version.m4 b/build/ax_compare_version.m4 new file mode 100644 index 00000000..74dc0fdd --- /dev/null +++ b/build/ax_compare_version.m4 @@ -0,0 +1,177 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_compare_version.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_COMPARE_VERSION(VERSION_A, OP, VERSION_B, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) +# +# DESCRIPTION +# +# This macro compares two version strings. Due to the various number of +# minor-version numbers that can exist, and the fact that string +# comparisons are not compatible with numeric comparisons, this is not +# necessarily trivial to do in a autoconf script. This macro makes doing +# these comparisons easy. +# +# The six basic comparisons are available, as well as checking equality +# limited to a certain number of minor-version levels. +# +# The operator OP determines what type of comparison to do, and can be one +# of: +# +# eq - equal (test A == B) +# ne - not equal (test A != B) +# le - less than or equal (test A <= B) +# ge - greater than or equal (test A >= B) +# lt - less than (test A < B) +# gt - greater than (test A > B) +# +# Additionally, the eq and ne operator can have a number after it to limit +# the test to that number of minor versions. +# +# eq0 - equal up to the length of the shorter version +# ne0 - not equal up to the length of the shorter version +# eqN - equal up to N sub-version levels +# neN - not equal up to N sub-version levels +# +# When the condition is true, shell commands ACTION-IF-TRUE are run, +# otherwise shell commands ACTION-IF-FALSE are run. The environment +# variable 'ax_compare_version' is always set to either 'true' or 'false' +# as well. +# +# Examples: +# +# AX_COMPARE_VERSION([3.15.7],[lt],[3.15.8]) +# AX_COMPARE_VERSION([3.15],[lt],[3.15.8]) +# +# would both be true. +# +# AX_COMPARE_VERSION([3.15.7],[eq],[3.15.8]) +# AX_COMPARE_VERSION([3.15],[gt],[3.15.8]) +# +# would both be false. +# +# AX_COMPARE_VERSION([3.15.7],[eq2],[3.15.8]) +# +# would be true because it is only comparing two minor versions. +# +# AX_COMPARE_VERSION([3.15.7],[eq0],[3.15]) +# +# would be true because it is only comparing the lesser number of minor +# versions of the two values. +# +# Note: The characters that separate the version numbers do not matter. An +# empty string is the same as version 0. OP is evaluated by autoconf, not +# configure, so must be a string, not a variable. +# +# The author would like to acknowledge Guido Draheim whose advice about +# the m4_case and m4_ifvaln functions make this macro only include the +# portions necessary to perform the specific comparison specified by the +# OP argument in the final configure script. +# +# LICENSE +# +# Copyright (c) 2008 Tim Toolan +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 11 + +dnl ######################################################################### +AC_DEFUN([AX_COMPARE_VERSION], [ + AC_REQUIRE([AC_PROG_AWK]) + + # Used to indicate true or false condition + ax_compare_version=false + + # Convert the two version strings to be compared into a format that + # allows a simple string comparison. The end result is that a version + # string of the form 1.12.5-r617 will be converted to the form + # 0001001200050617. In other words, each number is zero padded to four + # digits, and non digits are removed. + AS_VAR_PUSHDEF([A],[ax_compare_version_A]) + A=`echo "$1" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \ + -e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \ + -e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \ + -e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \ + -e 's/[[^0-9]]//g'` + + AS_VAR_PUSHDEF([B],[ax_compare_version_B]) + B=`echo "$3" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \ + -e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \ + -e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \ + -e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \ + -e 's/[[^0-9]]//g'` + + dnl # In the case of le, ge, lt, and gt, the strings are sorted as necessary + dnl # then the first line is used to determine if the condition is true. + dnl # The sed right after the echo is to remove any indented white space. + m4_case(m4_tolower($2), + [lt],[ + ax_compare_version=`echo "x$A +x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/false/;s/x${B}/true/;1q"` + ], + [gt],[ + ax_compare_version=`echo "x$A +x$B" | sed 's/^ *//' | sort | sed "s/x${A}/false/;s/x${B}/true/;1q"` + ], + [le],[ + ax_compare_version=`echo "x$A +x$B" | sed 's/^ *//' | sort | sed "s/x${A}/true/;s/x${B}/false/;1q"` + ], + [ge],[ + ax_compare_version=`echo "x$A +x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/true/;s/x${B}/false/;1q"` + ],[ + dnl Split the operator from the subversion count if present. + m4_bmatch(m4_substr($2,2), + [0],[ + # A count of zero means use the length of the shorter version. + # Determine the number of characters in A and B. + ax_compare_version_len_A=`echo "$A" | $AWK '{print(length)}'` + ax_compare_version_len_B=`echo "$B" | $AWK '{print(length)}'` + + # Set A to no more than B's length and B to no more than A's length. + A=`echo "$A" | sed "s/\(.\{$ax_compare_version_len_B\}\).*/\1/"` + B=`echo "$B" | sed "s/\(.\{$ax_compare_version_len_A\}\).*/\1/"` + ], + [[0-9]+],[ + # A count greater than zero means use only that many subversions + A=`echo "$A" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"` + B=`echo "$B" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"` + ], + [.+],[ + AC_WARNING( + [illegal OP numeric parameter: $2]) + ],[]) + + # Pad zeros at end of numbers to make same length. + ax_compare_version_tmp_A="$A`echo $B | sed 's/./0/g'`" + B="$B`echo $A | sed 's/./0/g'`" + A="$ax_compare_version_tmp_A" + + # Check for equality or inequality as necessary. + m4_case(m4_tolower(m4_substr($2,0,2)), + [eq],[ + test "x$A" = "x$B" && ax_compare_version=true + ], + [ne],[ + test "x$A" != "x$B" && ax_compare_version=true + ],[ + AC_WARNING([illegal OP parameter: $2]) + ]) + ]) + + AS_VAR_POPDEF([A])dnl + AS_VAR_POPDEF([B])dnl + + dnl # Execute ACTION-IF-TRUE / ACTION-IF-FALSE. + if test "$ax_compare_version" = "true" ; then + m4_ifvaln([$4],[$4],[:])dnl + m4_ifvaln([$5],[else $5])dnl + fi +]) dnl AX_COMPARE_VERSION diff --git a/build/ax_compiler_vendor.m4 b/build/ax_compiler_vendor.m4 new file mode 100644 index 00000000..73e32ea9 --- /dev/null +++ b/build/ax_compiler_vendor.m4 @@ -0,0 +1,84 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_COMPILER_VENDOR +# +# DESCRIPTION +# +# Determine the vendor of the C/C++ compiler, e.g., gnu, intel, ibm, sun, +# hp, borland, comeau, dec, cray, kai, lcc, metrowerks, sgi, microsoft, +# watcom, etc. The vendor is returned in the cache variable +# $ax_cv_c_compiler_vendor for C and $ax_cv_cxx_compiler_vendor for C++. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 11 + +AC_DEFUN([AX_COMPILER_VENDOR], +[AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, + [# note: don't check for gcc first since some other compilers define __GNUC__ + vendors="intel: __ICC,__ECC,__INTEL_COMPILER + ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__ + pathscale: __PATHCC__,__PATHSCALE__ + clang: __clang__ + gnu: __GNUC__ + sun: __SUNPRO_C,__SUNPRO_CC + hp: __HP_cc,__HP_aCC + dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER + borland: __BORLANDC__,__TURBOC__ + comeau: __COMO__ + cray: _CRAYC + kai: __KCC + lcc: __LCC__ + sgi: __sgi,sgi + microsoft: _MSC_VER + metrowerks: __MWERKS__ + watcom: __WATCOMC__ + portland: __PGI + unknown: UNKNOWN" + for ventest in $vendors; do + case $ventest in + *:) vendor=$ventest; continue ;; + *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;; + esac + AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,[ + #if !($vencpp) + thisisanerror; + #endif + ])], [break]) + done + ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=`echo $vendor | cut -d: -f1` + ]) +]) diff --git a/build/ax_cxx_compile_stdcxx_11.m4 b/build/ax_cxx_compile_stdcxx_11.m4 new file mode 100644 index 00000000..e4ba5f55 --- /dev/null +++ b/build/ax_cxx_compile_stdcxx_11.m4 @@ -0,0 +1,116 @@ +# ============================================================================ +# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx_11.html +# ============================================================================ +# +# SYNOPSIS +# +# AX_CXX_COMPILE_STDCXX_11([ext|noext],[action-if-success],[action-if-failure]) +# +# DESCRIPTION +# +# Check for baseline language coverage in the compiler for the C++11 +# standard; if necessary, add switches to CXXFLAGS to enable support. +# Errors out if no mode that supports C++11 baseline syntax can be found. +# The argument, if specified, indicates whether you insist on an extended +# mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. -std=c++11). +# If neither is specified, you get whatever works, with preference for an +# extended mode. +# +# LICENSE +# +# Copyright (c) 2008 Benjamin Kosnik +# Copyright (c) 2012 Zack Weinberg +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 1 + +m4_define([_AX_CXX_COMPILE_STDCXX_11_testbody], [ + template + struct check + { + static_assert(sizeof(int) <= sizeof(T), "not big enough"); + }; + + + struct in_class_initialization { + int i = 0; + }; + + typedef check> right_angle_brackets; + + int a; + decltype(a) b; + + typedef check check_type; + check_type c; + check_type&& cr = static_cast(c); + + void * null_pointer_keyword = nullptr; +]) + +AC_DEFUN([AX_CXX_COMPILE_STDCXX_11], [dnl + m4_if([$1], [], [], + [$1], [ext], [], + [$1], [noext], [], + [m4_fatal([invalid argument `$1' to AX_CXX_COMPILE_STDCXX_11])])dnl + AC_LANG_ASSERT([C++])dnl + ac_success=no + AC_CACHE_CHECK(whether $CXX supports C++11 features by default, + ax_cv_cxx_compile_cxx11, + [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])], + [ax_cv_cxx_compile_cxx11=yes], + [ax_cv_cxx_compile_cxx11=no])]) + if test x$ax_cv_cxx_compile_cxx11 = xyes; then + ac_success=yes + fi + + m4_if([$1], [noext], [], [dnl + if test x$ac_success = xno; then + for switch in -std=gnu++11 -std=gnu++0x; do + cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx11_$switch]) + AC_CACHE_CHECK(whether $CXX supports C++11 features with $switch, + $cachevar, + [ac_save_CXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS $switch" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])], + [eval $cachevar=yes], + [eval $cachevar=no]) + CXXFLAGS="$ac_save_CXXFLAGS"]) + if eval test x\$$cachevar = xyes; then + CXXFLAGS="$CXXFLAGS $switch" + ac_success=yes + break + fi + done + fi]) + + m4_if([$1], [ext], [], [dnl + if test x$ac_success = xno; then + for switch in -std=c++11 -std=c++0x; do + cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx11_$switch]) + AC_CACHE_CHECK(whether $CXX supports C++11 features with $switch, + $cachevar, + [ac_save_CXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS $switch" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_11_testbody])], + [eval $cachevar=yes], + [eval $cachevar=no]) + CXXFLAGS="$ac_save_CXXFLAGS"]) + if eval test x\$$cachevar = xyes; then + CXXFLAGS="$CXXFLAGS $switch" + ac_success=yes + break + fi + done + fi]) + + if test x$ac_success = xno ; then + m4_if([$3], [], [true], [$3]) + else + m4_if([$2], [], [true], [$2]) + fi +]) diff --git a/build/ax_lib_curl.m4 b/build/ax_lib_curl.m4 new file mode 100644 index 00000000..198e87ad --- /dev/null +++ b/build/ax_lib_curl.m4 @@ -0,0 +1,38 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_lib_curl.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_LIB_CURL([VERSION],[ACTION-IF-SUCCESS],[ACTION-IF-FAILURE]) +# +# DESCRIPTION +# +# Checks for minimum curl library version VERSION. If successfull executes +# ACTION-IF-SUCCESS otherwise ACTION-IF-FAILURE. +# +# Defines CURL_LIBS and CURL_CFLAGS. +# +# A simple example: +# +# AX_LIB_CURL([7.19.4],,[ +# AC_MSG_ERROR([Your system lacks of libcurl >= 7.19.4]) +# ]) +# +# This macro is a rearranged version of AC_LIB_CURL from Akos Maroy. +# +# LICENSE +# +# Copyright (c) 2009 Francesco Salvestrini +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 7 + +AU_ALIAS([AC_CHECK_CURL], [AX_LIB_CURL]) +AC_DEFUN([AX_LIB_CURL], [ + AX_PATH_GENERIC([curl],[$1],['s/^libcurl\ \+//'],[$2],[$3]) +]) diff --git a/build/ax_lib_readline.m4 b/build/ax_lib_readline.m4 new file mode 100644 index 00000000..1af80b25 --- /dev/null +++ b/build/ax_lib_readline.m4 @@ -0,0 +1,110 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_lib_readline.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_LIB_READLINE +# +# DESCRIPTION +# +# Searches for a readline compatible library. If found, defines +# `HAVE_LIBREADLINE'. If the found library has the `add_history' function, +# sets also `HAVE_READLINE_HISTORY'. Also checks for the locations of the +# necessary include files and sets `HAVE_READLINE_H' or +# `HAVE_READLINE_READLINE_H' and `HAVE_READLINE_HISTORY_H' or +# 'HAVE_HISTORY_H' if the corresponding include files exists. +# +# The libraries that may be readline compatible are `libedit', +# `libeditline' and `libreadline'. Sometimes we need to link a termcap +# library for readline to work, this macro tests these cases too by trying +# to link with `libtermcap', `libcurses' or `libncurses' before giving up. +# +# Here is an example of how to use the information provided by this macro +# to perform the necessary includes or declarations in a C file: +# +# #ifdef HAVE_LIBREADLINE +# # if defined(HAVE_READLINE_READLINE_H) +# # include +# # elif defined(HAVE_READLINE_H) +# # include +# # else /* !defined(HAVE_READLINE_H) */ +# extern char *readline (); +# # endif /* !defined(HAVE_READLINE_H) */ +# char *cmdline = NULL; +# #else /* !defined(HAVE_READLINE_READLINE_H) */ +# /* no readline */ +# #endif /* HAVE_LIBREADLINE */ +# +# #ifdef HAVE_READLINE_HISTORY +# # if defined(HAVE_READLINE_HISTORY_H) +# # include +# # elif defined(HAVE_HISTORY_H) +# # include +# # else /* !defined(HAVE_HISTORY_H) */ +# extern void add_history (); +# extern int write_history (); +# extern int read_history (); +# # endif /* defined(HAVE_READLINE_HISTORY_H) */ +# /* no history */ +# #endif /* HAVE_READLINE_HISTORY */ +# +# LICENSE +# +# Copyright (c) 2008 Ville Laurikari +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AU_ALIAS([VL_LIB_READLINE], [AX_LIB_READLINE]) +AC_DEFUN([AX_LIB_READLINE], [ + AC_CACHE_CHECK([for a readline compatible library], + ax_cv_lib_readline, [ + ORIG_LIBS="$LIBS" + for readline_lib in edit editline; do + for termcap_lib in "" termcap curses ncurses; do + if test -z "$termcap_lib"; then + TRY_LIB="-l$readline_lib" + else + TRY_LIB="-l$readline_lib -l$termcap_lib" + fi + LIBS="$ORIG_LIBS $TRY_LIB" + AC_TRY_LINK_FUNC(readline, ax_cv_lib_readline="$TRY_LIB") + if test -n "$ax_cv_lib_readline"; then + break + fi + done + if test -n "$ax_cv_lib_readline"; then + break + fi + done + if test -z "$ax_cv_lib_readline"; then + ax_cv_lib_readline="no" + fi + LIBS="$ORIG_LIBS" + ]) + + if test "$ax_cv_lib_readline" != "no"; then + ORIG_LIBS="$LIBS" + LIBS="$LIBS $ax_cv_lib_readline" + AC_SUBST(LIBREADLINE, [$ax_cv_lib_readline]) + AC_DEFINE(HAVE_LIBREADLINE, 1, + [Define if you have a readline compatible library]) + AC_CHECK_HEADERS(readline.h readline/readline.h) + AC_CACHE_CHECK([whether readline supports history], + ax_cv_lib_readline_history, [ + ax_cv_lib_readline_history="no" + AC_TRY_LINK_FUNC(add_history, ax_cv_lib_readline_history="yes") + ]) + if test "$ax_cv_lib_readline_history" = "yes"; then + AC_DEFINE(HAVE_READLINE_HISTORY, 1, + [Define if your readline library has \`add_history']) + AC_CHECK_HEADERS(history.h readline/history.h) + fi + LIBS="$ORIG_LIBS" + fi +])dnl diff --git a/build/ax_path_generic.m4 b/build/ax_path_generic.m4 new file mode 100644 index 00000000..351360da --- /dev/null +++ b/build/ax_path_generic.m4 @@ -0,0 +1,164 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_path_generic.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_PATH_GENERIC(LIBRARY,[MINIMUM-VERSION,[SED-EXPR-EXTRACTOR]],[ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND],[CONFIG-SCRIPTS],[CFLAGS-ARG],[LIBS-ARG]) +# +# DESCRIPTION +# +# Runs the LIBRARY-config script and defines LIBRARY_CFLAGS and +# LIBRARY_LIBS unless the user had predefined them in the environment. +# +# The script must support `--cflags' and `--libs' args. If MINIMUM-VERSION +# is specified, the script must also support the `--version' arg. If the +# `--with-library-[exec-]prefix' arguments to ./configure are given, it +# must also support `--prefix' and `--exec-prefix'. Prefereable use +# CONFIG-SCRIPTS as config script, CFLAGS-ARG instead of `--cflags` and +# LIBS-ARG instead of `--libs`, if given. +# +# The SED-EXPR-EXTRACTOR parameter representes the expression used in sed +# to extract the version number. Use it if your 'foo-config --version' +# dumps something like 'Foo library v1.0.0 (alfa)' instead of '1.0.0'. +# +# The macro respects LIBRARY_CONFIG, LIBRARY_CFLAGS and LIBRARY_LIBS +# variables. If the first one is defined, it specifies the name of the +# config script to use. If the latter two are defined, the script is not +# ran at all and their values are used instead (if only one of them is +# defined, the empty value of the remaining one is still used). +# +# Example: +# +# AX_PATH_GENERIC(Foo, 1.0.0) +# +# would run `foo-config --version' and check that it is at least 1.0.0, if +# successful the following variables would be defined and substituted: +# +# FOO_CFLAGS to `foo-config --cflags` +# FOO_LIBS to `foo-config --libs` +# +# Example: +# +# AX_PATH_GENERIC([Bar],,,[ +# AC_MSG_ERROR([Cannot find Bar library]) +# ]) +# +# would check for bar-config program, defining and substituting the +# following variables: +# +# BAR_CFLAGS to `bar-config --cflags` +# BAR_LIBS to `bar-config --libs` +# +# Example: +# +# ./configure BAZ_LIBS=/usr/lib/libbaz.a +# +# would link with a static version of baz library even if `baz-config +# --libs` returns just "-lbaz" that would normally result in using the +# shared library. +# +# This macro is a rearranged version of AC_PATH_GENERIC from Angus Lees. +# +# LICENSE +# +# Copyright (c) 2009 Francesco Salvestrini +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 11 + +AU_ALIAS([AC_PATH_GENERIC], [AX_PATH_GENERIC]) +AC_DEFUN([AX_PATH_GENERIC],[ + AC_REQUIRE([AC_PROG_SED]) + + dnl we're going to need uppercase and lowercase versions of the + dnl string `LIBRARY' + pushdef([UP], translit([$1], [a-z], [A-Z]))dnl + pushdef([DOWN], translit([$1], [A-Z], [a-z]))dnl + + AC_ARG_WITH(DOWN-prefix,[AS_HELP_STRING([--with-]DOWN[-prefix=PREFIX], [Prefix where $1 is installed (optional)])], + DOWN[]_config_prefix="$withval", DOWN[]_config_prefix="") + AC_ARG_WITH(DOWN-exec-prefix,[AS_HELP_STRING([--with-]DOWN[-exec-prefix=EPREFIX], [Exec prefix where $1 is installed (optional)])], + DOWN[]_config_exec_prefix="$withval", DOWN[]_config_exec_prefix="") + + AC_ARG_VAR(UP[]_CONFIG, [config script used for $1]) + AC_ARG_VAR(UP[]_CFLAGS, [CFLAGS used for $1]) + AC_ARG_VAR(UP[]_LIBS, [LIBS used for $1]) + + AS_IF([test x$UP[]_CFLAGS != x -o x$UP[]_LIBS != x],[ + dnl Don't run config script at all, use user-provided values instead. + AC_SUBST(UP[]_CFLAGS) + AC_SUBST(UP[]_LIBS) + : + $4 + ],[ + AS_IF([test x$DOWN[]_config_exec_prefix != x],[ + DOWN[]_config_args="$DOWN[]_config_args --exec-prefix=$DOWN[]_config_exec_prefix" + AS_IF([test x${UP[]_CONFIG+set} != xset],[ + UP[]_CONFIG=$DOWN[]_config_exec_prefix/bin/DOWN-config + ]) + ]) + AS_IF([test x$DOWN[]_config_prefix != x],[ + DOWN[]_config_args="$DOWN[]_config_args --prefix=$DOWN[]_config_prefix" + AS_IF([test x${UP[]_CONFIG+set} != xset],[ + UP[]_CONFIG=$DOWN[]_config_prefix/bin/DOWN-config + ]) + ]) + + AC_PATH_PROGS(UP[]_CONFIG,[$6 DOWN-config],[no]) + AS_IF([test "$UP[]_CONFIG" == "no"],[ + : + $5 + ],[ + dnl Get the CFLAGS from LIBRARY-config script + AS_IF([test x"$7" == x],[ + UP[]_CFLAGS="`$UP[]_CONFIG $DOWN[]_config_args --cflags`" + ],[ + UP[]_CFLAGS="`$UP[]_CONFIG $DOWN[]_config_args $7`" + ]) + + dnl Get the LIBS from LIBRARY-config script + AS_IF([test x"$8" == x],[ + UP[]_LIBS="`$UP[]_CONFIG $DOWN[]_config_args --libs`" + ],[ + UP[]_LIBS="`$UP[]_CONFIG $DOWN[]_config_args $8`" + ]) + + AS_IF([test x"$2" != x],[ + dnl Check for provided library version + AS_IF([test x"$3" != x],[ + dnl Use provided sed expression + DOWN[]_version="`$UP[]_CONFIG $DOWN[]_config_args --version | $SED -e $3`" + ],[ + DOWN[]_version="`$UP[]_CONFIG $DOWN[]_config_args --version | $SED -e 's/^\ *\(.*\)\ *$/\1/'`" + ]) + + AC_MSG_CHECKING([for $1 ($DOWN[]_version) >= $2]) + AX_COMPARE_VERSION($DOWN[]_version,[ge],[$2],[ + AC_MSG_RESULT([yes]) + + AC_SUBST(UP[]_CFLAGS) + AC_SUBST(UP[]_LIBS) + : + $4 + ],[ + AC_MSG_RESULT([no]) + : + $5 + ]) + ],[ + AC_SUBST(UP[]_CFLAGS) + AC_SUBST(UP[]_LIBS) + : + $4 + ]) + ]) + ]) + + popdef([UP]) + popdef([DOWN]) +]) diff --git a/build/ax_perl_modules.m4 b/build/ax_perl_modules.m4 new file mode 100644 index 00000000..4d881d46 --- /dev/null +++ b/build/ax_perl_modules.m4 @@ -0,0 +1,51 @@ +# LICENSE +# +# Copyright (c) 2009 Dean Povey +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. +# +# Use: AX_PROG_PERL_MODULES([modules ...], true, false ) +# +# This should only be used after the test for the Perl module in configure.ac. +# +#serial 7 + +AU_ALIAS([AC_PROG_PERL_MODULES], [AX_PROG_PERL_MODULES]) +AC_DEFUN([AX_PROG_PERL_MODULES],[dnl + +m4_define([ax_perl_modules]) +m4_foreach([ax_perl_module], m4_split(m4_normalize([$1])), + [ + m4_append([ax_perl_modules], + [']m4_bpatsubst(ax_perl_module,=,[ ])[' ]) + ]) + +if test "x$PERL" != x; then + ax_perl_modules_failed=0 + for ax_perl_module in ax_perl_modules; do + AC_MSG_CHECKING(for perl module $ax_perl_module) + + # Would be nice to log result here, but can't rely on autoconf internals + $PERL -e "use $ax_perl_module; exit" > /dev/null 2>&1 + if test $? -ne 0; then + AC_MSG_RESULT(no); + ax_perl_modules_failed=1 + else + AC_MSG_RESULT(ok); + fi + done + + # Run optional shell commands + if test "$ax_perl_modules_failed" = 0; then + : + $2 + else + : + $3 + fi +else + AC_MSG_WARN(could not find perl) +fi]) diff --git a/build/ax_with_curses.m4 b/build/ax_with_curses.m4 new file mode 100644 index 00000000..33a37ac9 --- /dev/null +++ b/build/ax_with_curses.m4 @@ -0,0 +1,518 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_with_curses.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_WITH_CURSES +# +# DESCRIPTION +# +# This macro checks whether a SysV or X/Open-compatible Curses library is +# present, along with the associated header file. The NcursesW +# (wide-character) library is searched for first, followed by Ncurses, +# then the system-default plain Curses. The first library found is the +# one returned. +# +# The following options are understood: --with-ncursesw, --with-ncurses, +# --without-ncursesw, --without-ncurses. The "--with" options force the +# macro to use that particular library, terminating with an error if not +# found. The "--without" options simply skip the check for that library. +# The effect on the search pattern is: +# +# (no options) - NcursesW, Ncurses, Curses +# --with-ncurses --with-ncursesw - NcursesW only [*] +# --without-ncurses --with-ncursesw - NcursesW only [*] +# --with-ncursesw - NcursesW only [*] +# --with-ncurses --without-ncursesw - Ncurses only [*] +# --with-ncurses - NcursesW, Ncurses [**] +# --without-ncurses --without-ncursesw - Curses only +# --without-ncursesw - Ncurses, Curses +# --without-ncurses - NcursesW, Curses +# +# [*] If the library is not found, abort the configure script. +# +# [**] If the second library (Ncurses) is not found, abort configure. +# +# The following preprocessor symbols may be defined by this macro if the +# appropriate conditions are met: +# +# HAVE_CURSES - if any SysV or X/Open Curses library found +# HAVE_CURSES_ENHANCED - if library supports X/Open Enhanced functions +# HAVE_CURSES_COLOR - if library supports color (enhanced functions) +# HAVE_CURSES_OBSOLETE - if library supports certain obsolete features +# HAVE_NCURSESW - if NcursesW (wide char) library is to be used +# HAVE_NCURSES - if the Ncurses library is to be used +# +# HAVE_CURSES_H - if is present and should be used +# HAVE_NCURSESW_H - if should be used +# HAVE_NCURSES_H - if should be used +# HAVE_NCURSESW_CURSES_H - if should be used +# HAVE_NCURSES_CURSES_H - if should be used +# +# (These preprocessor symbols are discussed later in this document.) +# +# The following output variable is defined by this macro; it is precious +# and may be overridden on the ./configure command line: +# +# CURSES_LIB - library to add to xxx_LDADD +# +# The library listed in CURSES_LIB is NOT added to LIBS by default. You +# need to add CURSES_LIB to the appropriate xxx_LDADD line in your +# Makefile.am. For example: +# +# prog_LDADD = @CURSES_LIB@ +# +# If CURSES_LIB is set on the configure command line (such as by running +# "./configure CURSES_LIB=-lmycurses"), then the only header searched for +# is . The user may use the CPPFLAGS precious variable to +# override the standard #include search path. If the user needs to +# specify an alternative path for a library (such as for a non-standard +# NcurseW), the user should use the LDFLAGS variable. +# +# The following shell variables may be defined by this macro: +# +# ax_cv_curses - set to "yes" if any Curses library found +# ax_cv_curses_enhanced - set to "yes" if Enhanced functions present +# ax_cv_curses_color - set to "yes" if color functions present +# ax_cv_curses_obsolete - set to "yes" if obsolete features present +# +# ax_cv_ncursesw - set to "yes" if NcursesW library found +# ax_cv_ncurses - set to "yes" if Ncurses library found +# ax_cv_plaincurses - set to "yes" if plain Curses library found +# ax_cv_curses_which - set to "ncursesw", "ncurses", "plaincurses" or "no" +# +# These variables can be used in your configure.ac to determine the level +# of support you need from the Curses library. For example, if you must +# have either Ncurses or NcursesW, you could include: +# +# AX_WITH_CURSES +# if test "x$ax_cv_ncursesw" != xyes && test "x$ax_cv_ncurses" != xyes; then +# AX_MSG_ERROR([requires either NcursesW or Ncurses library]) +# fi +# +# If any Curses library will do (but one must be present and must support +# color), you could use: +# +# AX_WITH_CURSES +# if test "x$ax_cv_curses" != xyes || test "x$ax_cv_curses_color" != xyes; then +# AC_MSG_ERROR([requires an X/Open-compatible Curses library with color]) +# fi +# +# Certain preprocessor symbols and shell variables defined by this macro +# can be used to determine various features of the Curses library. In +# particular, HAVE_CURSES and ax_cv_curses are defined if the Curses +# library found conforms to the traditional SysV and/or X/Open Base Curses +# definition. Any working Curses library conforms to this level. +# +# HAVE_CURSES_ENHANCED and ax_cv_curses_enhanced are defined if the +# library supports the X/Open Enhanced Curses definition. In particular, +# the wide-character types attr_t, cchar_t and wint_t, the functions +# wattr_set() and wget_wch() and the macros WA_NORMAL and _XOPEN_CURSES +# are checked. The Ncurses library does NOT conform to this definition, +# although NcursesW does. +# +# HAVE_CURSES_COLOR and ax_cv_curses_color are defined if the library +# supports color functions and macros such as COLOR_PAIR, A_COLOR, +# COLOR_WHITE, COLOR_RED and init_pair(). These are NOT part of the +# X/Open Base Curses definition, but are part of the Enhanced set of +# functions. The Ncurses library DOES support these functions, as does +# NcursesW. +# +# HAVE_CURSES_OBSOLETE and ax_cv_curses_obsolete are defined if the +# library supports certain features present in SysV and BSD Curses but not +# defined in the X/Open definition. In particular, the functions +# getattrs(), getcurx() and getmaxx() are checked. +# +# To use the HAVE_xxx_H preprocessor symbols, insert the following into +# your system.h (or equivalent) header file: +# +# #if defined HAVE_NCURSESW_CURSES_H +# # include +# #elif defined HAVE_NCURSESW_H +# # include +# #elif defined HAVE_NCURSES_CURSES_H +# # include +# #elif defined HAVE_NCURSES_H +# # include +# #elif defined HAVE_CURSES_H +# # include +# #else +# # error "SysV or X/Open-compatible Curses header file required" +# #endif +# +# For previous users of this macro: you should not need to change anything +# in your configure.ac or Makefile.am, as the previous (serial 10) +# semantics are still valid. However, you should update your system.h (or +# equivalent) header file to the fragment shown above. You are encouraged +# also to make use of the extended functionality provided by this version +# of AX_WITH_CURSES, as well as in the additional macros +# AX_WITH_CURSES_PANEL, AX_WITH_CURSES_MENU and AX_WITH_CURSES_FORM. +# +# LICENSE +# +# Copyright (c) 2009 Mark Pulford +# Copyright (c) 2009 Damian Pietras +# Copyright (c) 2012 Reuben Thomas +# Copyright (c) 2011 John Zaitseff +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 13 + +AU_ALIAS([MP_WITH_CURSES], [AX_WITH_CURSES]) +AC_DEFUN([AX_WITH_CURSES], [ + AC_ARG_VAR([CURSES_LIB], [linker library for Curses, e.g. -lcurses]) + AC_ARG_WITH([ncurses], [AS_HELP_STRING([--with-ncurses], + [force the use of Ncurses or NcursesW])], + [], [with_ncurses=check]) + AC_ARG_WITH([ncursesw], [AS_HELP_STRING([--without-ncursesw], + [do not use NcursesW (wide character support)])], + [], [with_ncursesw=check]) + + ax_saved_LIBS=$LIBS + AS_IF([test "x$with_ncurses" = xyes || test "x$with_ncursesw" = xyes], + [ax_with_plaincurses=no], [ax_with_plaincurses=check]) + + ax_cv_curses_which=no + + # Test for NcursesW + + AS_IF([test "x$CURSES_LIB" = x && test "x$with_ncursesw" != xno], [ + LIBS="$ax_saved_LIBS -lncursesw" + + AC_CACHE_CHECK([for NcursesW wide-character library], [ax_cv_ncursesw], [ + AC_LINK_IFELSE([AC_LANG_CALL([], [initscr])], + [ax_cv_ncursesw=yes], [ax_cv_ncursesw=no]) + ]) + AS_IF([test "x$ax_cv_ncursesw" = xno && test "x$with_ncursesw" = xyes], [ + AC_MSG_ERROR([--with-ncursesw specified but could not find NcursesW library]) + ]) + + AS_IF([test "x$ax_cv_ncursesw" = xyes], [ + ax_cv_curses=yes + ax_cv_curses_which=ncursesw + CURSES_LIB="-lncursesw" + AC_DEFINE([HAVE_NCURSESW], [1], [Define to 1 if the NcursesW library is present]) + AC_DEFINE([HAVE_CURSES], [1], [Define to 1 if a SysV or X/Open compatible Curses library is present]) + + AC_CACHE_CHECK([for working ncursesw/curses.h], [ax_cv_header_ncursesw_curses_h], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@define _XOPEN_SOURCE_EXTENDED 1 + @%:@include + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + attr_t d = WA_NORMAL; + cchar_t e; + wint_t f; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + wattr_set(stdscr, d, 0, NULL); + wget_wch(stdscr, &f); + ]])], + [ax_cv_header_ncursesw_curses_h=yes], + [ax_cv_header_ncursesw_curses_h=no]) + ]) + AS_IF([test "x$ax_cv_header_ncursesw_curses_h" = xyes], [ + ax_cv_curses_enhanced=yes + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + AC_DEFINE([HAVE_CURSES_ENHANCED], [1], [Define to 1 if library supports X/Open Enhanced functions]) + AC_DEFINE([HAVE_CURSES_COLOR], [1], [Define to 1 if library supports color (enhanced functions)]) + AC_DEFINE([HAVE_CURSES_OBSOLETE], [1], [Define to 1 if library supports certain obsolete features]) + AC_DEFINE([HAVE_NCURSESW_CURSES_H], [1], [Define to 1 if is present]) + ]) + + AC_CACHE_CHECK([for working ncursesw.h], [ax_cv_header_ncursesw_h], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@define _XOPEN_SOURCE_EXTENDED 1 + @%:@include + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + attr_t d = WA_NORMAL; + cchar_t e; + wint_t f; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + wattr_set(stdscr, d, 0, NULL); + wget_wch(stdscr, &f); + ]])], + [ax_cv_header_ncursesw_h=yes], + [ax_cv_header_ncursesw_h=no]) + ]) + AS_IF([test "x$ax_cv_header_ncursesw_h" = xyes], [ + ax_cv_curses_enhanced=yes + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + AC_DEFINE([HAVE_CURSES_ENHANCED], [1], [Define to 1 if library supports X/Open Enhanced functions]) + AC_DEFINE([HAVE_CURSES_COLOR], [1], [Define to 1 if library supports color (enhanced functions)]) + AC_DEFINE([HAVE_CURSES_OBSOLETE], [1], [Define to 1 if library supports certain obsolete features]) + AC_DEFINE([HAVE_NCURSESW_H], [1], [Define to 1 if is present]) + ]) + + AC_CACHE_CHECK([for working ncurses.h], [ax_cv_header_ncurses_h_with_ncursesw], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@define _XOPEN_SOURCE_EXTENDED 1 + @%:@include + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + attr_t d = WA_NORMAL; + cchar_t e; + wint_t f; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + wattr_set(stdscr, d, 0, NULL); + wget_wch(stdscr, &f); + ]])], + [ax_cv_header_ncurses_h_with_ncursesw=yes], + [ax_cv_header_ncurses_h_with_ncursesw=no]) + ]) + AS_IF([test "x$ax_cv_header_ncurses_h_with_ncursesw" = xyes], [ + ax_cv_curses_enhanced=yes + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + AC_DEFINE([HAVE_CURSES_ENHANCED], [1], [Define to 1 if library supports X/Open Enhanced functions]) + AC_DEFINE([HAVE_CURSES_COLOR], [1], [Define to 1 if library supports color (enhanced functions)]) + AC_DEFINE([HAVE_CURSES_OBSOLETE], [1], [Define to 1 if library supports certain obsolete features]) + AC_DEFINE([HAVE_NCURSES_H], [1], [Define to 1 if is present]) + ]) + + AS_IF([test "x$ax_cv_header_ncursesw_curses_h" = xno && test "x$ax_cv_header_ncursesw_h" = xno && test "x$ax_cv_header_ncurses_h_with_ncursesw" = xno], [ + AC_MSG_WARN([could not find a working ncursesw/curses.h, ncursesw.h or ncurses.h]) + ]) + ]) + ]) + + # Test for Ncurses + + AS_IF([test "x$CURSES_LIB" = x && test "x$with_ncurses" != xno && test "x$ax_cv_curses_which" = xno], [ + LIBS="$ax_saved_LIBS -lncurses" + + AC_CACHE_CHECK([for Ncurses library], [ax_cv_ncurses], [ + AC_LINK_IFELSE([AC_LANG_CALL([], [initscr])], + [ax_cv_ncurses=yes], [ax_cv_ncurses=no]) + ]) + AS_IF([test "x$ax_cv_ncurses" = xno && test "x$with_ncurses" = xyes], [ + AC_MSG_ERROR([--with-ncurses specified but could not find Ncurses library]) + ]) + + AS_IF([test "x$ax_cv_ncurses" = xyes], [ + ax_cv_curses=yes + ax_cv_curses_which=ncurses + CURSES_LIB="-lncurses" + AC_DEFINE([HAVE_NCURSES], [1], [Define to 1 if the Ncurses library is present]) + AC_DEFINE([HAVE_CURSES], [1], [Define to 1 if a SysV or X/Open compatible Curses library is present]) + + AC_CACHE_CHECK([for working ncurses/curses.h], [ax_cv_header_ncurses_curses_h], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@include + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + ]])], + [ax_cv_header_ncurses_curses_h=yes], + [ax_cv_header_ncurses_curses_h=no]) + ]) + AS_IF([test "x$ax_cv_header_ncurses_curses_h" = xyes], [ + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + AC_DEFINE([HAVE_CURSES_COLOR], [1], [Define to 1 if library supports color (enhanced functions)]) + AC_DEFINE([HAVE_CURSES_OBSOLETE], [1], [Define to 1 if library supports certain obsolete features]) + AC_DEFINE([HAVE_NCURSES_CURSES_H], [1], [Define to 1 if is present]) + ]) + + AC_CACHE_CHECK([for working ncurses.h], [ax_cv_header_ncurses_h], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@include + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + ]])], + [ax_cv_header_ncurses_h=yes], + [ax_cv_header_ncurses_h=no]) + ]) + AS_IF([test "x$ax_cv_header_ncurses_h" = xyes], [ + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + AC_DEFINE([HAVE_CURSES_COLOR], [1], [Define to 1 if library supports color (enhanced functions)]) + AC_DEFINE([HAVE_CURSES_OBSOLETE], [1], [Define to 1 if library supports certain obsolete features]) + AC_DEFINE([HAVE_NCURSES_H], [1], [Define to 1 if is present]) + ]) + + AS_IF([test "x$ax_cv_header_ncurses_curses_h" = xno && test "x$ax_cv_header_ncurses_h" = xno], [ + AC_MSG_WARN([could not find a working ncurses/curses.h or ncurses.h]) + ]) + ]) + ]) + + # Test for plain Curses (or if CURSES_LIB was set by user) + + AS_IF([test "x$with_plaincurses" != xno && test "x$ax_cv_curses_which" = xno], [ + AS_IF([test "x$CURSES_LIB" != x], [ + LIBS="$ax_saved_LIBS $CURSES_LIB" + ], [ + LIBS="$ax_saved_LIBS -lcurses" + ]) + + AC_CACHE_CHECK([for Curses library], [ax_cv_plaincurses], [ + AC_LINK_IFELSE([AC_LANG_CALL([], [initscr])], + [ax_cv_plaincurses=yes], [ax_cv_plaincurses=no]) + ]) + + AS_IF([test "x$ax_cv_plaincurses" = xyes], [ + ax_cv_curses=yes + ax_cv_curses_which=plaincurses + AS_IF([test "x$CURSES_LIB" = x], [ + CURSES_LIB="-lcurses" + ]) + AC_DEFINE([HAVE_CURSES], [1], [Define to 1 if a SysV or X/Open compatible Curses library is present]) + + # Check for base conformance (and header file) + + AC_CACHE_CHECK([for working curses.h], [ax_cv_header_curses_h], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@include + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + initscr(); + ]])], + [ax_cv_header_curses_h=yes], + [ax_cv_header_curses_h=no]) + ]) + AS_IF([test "x$ax_cv_header_curses_h" = xyes], [ + AC_DEFINE([HAVE_CURSES_H], [1], [Define to 1 if is present]) + + # Check for X/Open Enhanced conformance + + AC_CACHE_CHECK([for X/Open Enhanced Curses conformance], [ax_cv_plaincurses_enhanced], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@define _XOPEN_SOURCE_EXTENDED 1 + @%:@include + @%:@ifndef _XOPEN_CURSES + @%:@error "this Curses library is not enhanced" + "this Curses library is not enhanced" + @%:@endif + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + attr_t d = WA_NORMAL; + cchar_t e; + wint_t f; + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + wattr_set(stdscr, d, 0, NULL); + wget_wch(stdscr, &f); + ]])], + [ax_cv_plaincurses_enhanced=yes], + [ax_cv_plaincurses_enhanced=no]) + ]) + AS_IF([test "x$ax_cv_plaincurses_enhanced" = xyes], [ + ax_cv_curses_enhanced=yes + ax_cv_curses_color=yes + AC_DEFINE([HAVE_CURSES_ENHANCED], [1], [Define to 1 if library supports X/Open Enhanced functions]) + AC_DEFINE([HAVE_CURSES_COLOR], [1], [Define to 1 if library supports color (enhanced functions)]) + ]) + + # Check for color functions + + AC_CACHE_CHECK([for Curses color functions], [ax_cv_plaincurses_color], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@define _XOPEN_SOURCE_EXTENDED 1 + @%:@include + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + ]])], + [ax_cv_plaincurses_color=yes], + [ax_cv_plaincurses_color=no]) + ]) + AS_IF([test "x$ax_cv_plaincurses_color" = xyes], [ + ax_cv_curses_color=yes + AC_DEFINE([HAVE_CURSES_COLOR], [1], [Define to 1 if library supports color (enhanced functions)]) + ]) + + # Check for obsolete functions + + AC_CACHE_CHECK([for obsolete Curses functions], [ax_cv_plaincurses_obsolete], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + @%:@include + ]], [[ + chtype a = A_BOLD; + int b = KEY_LEFT; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + ]])], + [ax_cv_plaincurses_obsolete=yes], + [ax_cv_plaincurses_obsolete=no]) + ]) + AS_IF([test "x$ax_cv_plaincurses_obsolete" = xyes], [ + ax_cv_curses_obsolete=yes + AC_DEFINE([HAVE_CURSES_OBSOLETE], [1], [Define to 1 if library supports certain obsolete features]) + ]) + ]) + + AS_IF([test "x$ax_cv_header_curses_h" = xno], [ + AC_MSG_WARN([could not find a working curses.h]) + ]) + ]) + ]) + + AS_IF([test "x$ax_cv_curses" != xyes], [ax_cv_curses=no]) + AS_IF([test "x$ax_cv_curses_enhanced" != xyes], [ax_cv_curses_enhanced=no]) + AS_IF([test "x$ax_cv_curses_color" != xyes], [ax_cv_curses_color=no]) + AS_IF([test "x$ax_cv_curses_obsolete" != xyes], [ax_cv_curses_obsolete=no]) + + LIBS=$ax_saved_LIBS +])dnl diff --git a/build/common.m4 b/build/common.m4 new file mode 100644 index 00000000..53902880 --- /dev/null +++ b/build/common.m4 @@ -0,0 +1,585 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl +dnl common.m4: Trafficserver's general-purpose autoconf macros +dnl + +dnl +dnl TS_CONFIG_NICE(filename) +dnl +dnl Saves a snapshot of the configure command-line for later reuse +dnl +AC_DEFUN([TS_CONFIG_NICE], [ + rm -f $1 + cat >$1<> $1 + fi + if test -n "$CXX"; then + echo "CXX=\"$CXX\"; export CXX" >> $1 + fi + if test -n "$CFLAGS"; then + echo "CFLAGS=\"$CFLAGS\"; export CFLAGS" >> $1 + fi + if test -n "$CXXFLAGS"; then + echo "CXXFLAGS=\"$CXXFLAGS\"; export CXXFLAGS" >> $1 + fi + if test -n "$CPPFLAGS"; then + echo "CPPFLAGS=\"$CPPFLAGS\"; export CPPFLAGS" >> $1 + fi + if test -n "$LDFLAGS"; then + echo "LDFLAGS=\"$LDFLAGS\"; export LDFLAGS" >> $1 + fi + if test -n "$LTFLAGS"; then + echo "LTFLAGS=\"$LTFLAGS\"; export LTFLAGS" >> $1 + fi + if test -n "$LIBS"; then + echo "LIBS=\"$LIBS\"; export LIBS" >> $1 + fi + if test -n "$INCLUDES"; then + echo "INCLUDES=\"$INCLUDES\"; export INCLUDES" >> $1 + fi + if test -n "$NOTEST_CFLAGS"; then + echo "NOTEST_CFLAGS=\"$NOTEST_CFLAGS\"; export NOTEST_CFLAGS" >> $1 + fi + if test -n "$NOTEST_CXXFLAGS"; then + echo "NOTEST_CXXFLAGS=\"$NOTEST_CXXFLAGS\"; export NOTEST_CXXFLAGS" >> $1 + fi + if test -n "$NOTEST_CPPFLAGS"; then + echo "NOTEST_CPPFLAGS=\"$NOTEST_CPPFLAGS\"; export NOTEST_CPPFLAGS" >> $1 + fi + if test -n "$NOTEST_LDFLAGS"; then + echo "NOTEST_LDFLAGS=\"$NOTEST_LDFLAGS\"; export NOTEST_LDFLAGS" >> $1 + fi + if test -n "$NOTEST_LIBS"; then + echo "NOTEST_LIBS=\"$NOTEST_LIBS\"; export NOTEST_LIBS" >> $1 + fi + + # Retrieve command-line arguments. + eval "set x $[0] $ac_configure_args" + shift + + for arg + do + TS_EXPAND_VAR(arg, $arg) + echo "\"[$]arg\" \\" >> $1 + done + echo '"[$]@"' >> $1 + chmod +x $1 +])dnl + +dnl +dnl TS_SETIFNULL(variable, value) +dnl +dnl Set variable iff it's currently null +dnl +AC_DEFUN([TS_SETIFNULL], [ + if test -z "$$1"; then + test "x$verbose" = "xyes" && echo " setting $1 to \"$2\"" + $1="$2" + fi +])dnl + +dnl +dnl TS_SETVAR(variable, value) +dnl +dnl Set variable no matter what +dnl +AC_DEFUN([TS_SETVAR], [ + test "x$verbose" = "xyes" && echo " forcing $1 to \"$2\"" + $1="$2" +])dnl + +dnl +dnl TS_ADDTO(variable, value) +dnl +dnl Add value to variable +dnl +AC_DEFUN([TS_ADDTO], [ + if test "x$$1" = "x"; then + test "x$verbose" = "xyes" && echo " setting $1 to \"$2\"" + $1="$2" + else + ats_addto_bugger="$2" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $$1; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to $1" + $1="$$1 $i" + fi + done + fi +])dnl + +dnl +dnl TS_REMOVEFROM(variable, value) +dnl +dnl Remove a value from a variable +dnl +AC_DEFUN([TS_REMOVEFROM], [ + if test "x$$1" = "x$2"; then + test "x$verbose" = "xyes" && echo " nulling $1" + $1="" + else + ats_new_bugger="" + ats_removed=0 + for i in $$1; do + if test "x$i" != "x$2"; then + ats_new_bugger="$ats_new_bugger $i" + else + ats_removed=1 + fi + done + if test $ats_removed = "1"; then + test "x$verbose" = "xyes" && echo " removed \"$2\" from $1" + $1=$ats_new_bugger + fi + fi +]) dnl + +dnl +dnl TS_TRY_COMPILE_NO_WARNING(INCLUDES, FUNCTION-BODY, +dnl [ACTIONS-IF-NO-WARNINGS], [ACTIONS-IF-WARNINGS]) +dnl +dnl Tries a compile test with warnings activated so that the result +dnl is false if the code doesn't compile cleanly. For compilers +dnl where it is not known how to activate a "fail-on-error" mode, +dnl it is undefined which of the sets of actions will be run. +dnl +dnl We actually always try to link the resulting program, since gcc has +dnl a nasty habit of compiling code that cannot subsequently be linked. +dnl +AC_DEFUN([TS_TRY_COMPILE_NO_WARNING], +[ats_save_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS $CFLAGS_WARN" + if test "$ac_cv_prog_gcc" = "yes"; then + CFLAGS="$CFLAGS -Werror" + fi + CFLAGS=$(echo $CFLAGS | sed -e 's/^-w$//' -e 's/^-w //' -e 's/ -w$//' -e 's/ -w / /') + AC_LINK_IFELSE([AC_LANG_PROGRAM([$1], [$2])], [$3], [$4]) + CFLAGS=$ats_save_CFLAGS +]) + + +dnl Iteratively interpolate the contents of the second argument +dnl until interpolation offers no new result. Then assign the +dnl final result to $1. +dnl +dnl Example: +dnl +dnl foo=1 +dnl bar='${foo}/2' +dnl baz='${bar}/3' +dnl TS_EXPAND_VAR(fraz, $baz) +dnl $fraz is now "1/2/3" +dnl +AC_DEFUN([TS_EXPAND_VAR], [ +ats_last= +ats_cur="$2" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +$1="${ats_cur}" +]) + + +dnl +dnl Removes the value of $3 from the string in $2, strips of any leading +dnl slashes, and returns the value in $1. +dnl +dnl Example: +dnl orig_path="${prefix}/bar" +dnl TS_PATH_RELATIVE(final_path, $orig_path, $prefix) +dnl $final_path now contains "bar" +AC_DEFUN([TS_PATH_RELATIVE], [ +ats_stripped=`echo $2 | sed -e "s#^$3##"` +# check if the stripping was successful +if test "x$2" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + $1="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + $1="$2" +fi +]) + + +dnl TS_SUBST(VARIABLE) +dnl Makes VARIABLE available in generated files +dnl (do not use @variable@ in Makefiles, but $(variable)) +AC_DEFUN([TS_SUBST], [ + TS_VAR_SUBST="$TS_VAR_SUBST $1" + AC_SUBST($1) +]) + +dnl +dnl TS_SUBST_LAYOUT_PATH +dnl Export (via TS_SUBST) the various path-related variables that +dnl trafficserver will use while generating scripts and +dnl the default config file. +AC_DEFUN([TS_SUBST_LAYOUT_PATH], [ + TS_EXPAND_VAR(exp_$1, [$]$1) + TS_PATH_RELATIVE(rel_$1, [$]exp_$1, ${prefix}) + TS_SUBST(exp_$1) + TS_SUBST(rel_$1) + TS_SUBST($1) +]) + +dnl TS_HELP_STRING(LHS, RHS) +dnl Autoconf 2.50 can not handle substr correctly. It does have +dnl AC_HELP_STRING, so let's try to call it if we can. +dnl Note: this define must be on one line so that it can be properly returned +dnl as the help string. When using this macro with a multi-line RHS, ensure +dnl that you surround the macro invocation with []s +AC_DEFUN([TS_HELP_STRING], [ifelse(regexp(AC_ACVERSION, 2\.1), -1, AC_HELP_STRING([$1],[$2]),[ ][$1] substr([ ],len($1))[$2])]) + +dnl +dnl TS_LAYOUT(configlayout, layoutname [, extravars]) +dnl +AC_DEFUN([TS_LAYOUT], [ + if test ! -f $srcdir/config.layout; then + echo "** Error: Layout file $srcdir/config.layout not found" + echo "** Error: Cannot use undefined layout '$LAYOUT'" + exit 1 + fi + # Catch layout names including a slash which will otherwise + # confuse the heck out of the sed script. + case $2 in + */*) + echo "** Error: $2 is not a valid layout name" + exit 1 ;; + esac + pldconf=./config.pld + changequote({,}) + sed -e "1s/[ ]*<[lL]ayout[ ]*$2[ ]*>[ ]*//;1t" \ + -e "1,/[ ]*<[lL]ayout[ ]*$2[ ]*>[ ]*/d" \ + -e '/[ ]*<\/Layout>[ ]*/,$d' \ + -e "s/^[ ]*//g" \ + -e "s/:[ ]*/=\'/g" \ + -e "s/[ ]*$/'/g" \ + $1 > $pldconf + layout_name=$2 + if test ! -s $pldconf; then + echo "** Error: unable to find layout $layout_name" + exit 1 + fi + . $pldconf + rm $pldconf + for var in prefix exec_prefix bindir sbindir libexecdir mandir infodir \ + sysconfdir datadir includedir localstatedir runtimedir \ + logdir libdir installbuilddir libsuffix $3; do + eval "val=\"\$$var\"" + case $val in + *+) + val=`echo $val | sed -e 's;\+$;;'` + eval "$var=\"\$val\"" + autosuffix=yes + ;; + *) + autosuffix=no + ;; + esac + val=`echo $val | sed -e 's:\(.\)/*$:\1:'` + val=`echo $val | sed -e 's:[\$]\([a-z_]*\):${\1}:g'` + if test "$autosuffix" = "yes"; then + if echo $val | grep -i '/trafficserver$' >/dev/null; then + addtarget=no + else + addtarget=yes + fi + if test "$addtarget" = "yes"; then + val="$val/trafficserver" + fi + fi + eval "$var='$val'" + done + for var in bindir sbindir libexecdir mandir infodir sysconfdir \ + datadir localstatedir runtimedir logdir libdir $3; do + eval "val=\"\$$var\"" + case $val in + *+) + val=`echo $val | sed -e 's;\+$;;'` + eval "$var=\"\$val\"" + autosuffix=yes + ;; + *) + autosuffix=no + ;; + esac + org_val= + exp_val="$val" + while test "x${exp_val}" != "x${org_val}"; + do + org_val="${exp_val}" + exp_val="`eval \"echo ${exp_val}\"`" + done + if echo $exp_val | grep -i '/trafficserver$' >/dev/null; then + addtarget=no + else + addtarget=yes + fi + if test "$addsuffix" = "yes" -a "$addtarget" = "yes"; then + val="$val/trafficserver" + fi + var="pkg${var}" + eval "$var='$val'" + done + changequote([,]) +])dnl + +dnl +dnl TS_ENABLE_LAYOUT(default layout name [, extra vars]) +dnl +AC_DEFUN([TS_ENABLE_LAYOUT], [ +AC_ARG_ENABLE(layout, + [TS_HELP_STRING([--enable-layout=LAYOUT],[Enable LAYOUT specified inside config.layout file (defaults to TrafficServer)])],[ + LAYOUT=$enableval +]) + +if test -z "$LAYOUT"; then + LAYOUT="$1" +fi +TS_LAYOUT($srcdir/config.layout, $LAYOUT, $2) + +AC_MSG_CHECKING(for chosen layout) +AC_MSG_RESULT($layout_name) +]) + + +dnl +dnl TS_PARSE_ARGUMENTS +dnl a reimplementation of autoconf's argument parser, +dnl used here to allow us to co-exist layouts and argument based +dnl set ups. +AC_DEFUN([TS_PARSE_ARGUMENTS], [ +ac_prev= +# Retrieve the command-line arguments. The eval is needed because +# the arguments are quoted to preserve accuracy. +eval "set x $ac_configure_args" +shift +for ac_option +do +# If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval "$ac_prev=\$ac_option" + ac_prev= + continue + fi + + ac_optarg=`expr "x$ac_option" : 'x[[^=]]*=\(.*\)'` + + case $ac_option in + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir="$ac_optarg" + pkgbindir="$ac_optarg" ;; + + -datadir | --datadir | --datadi | --datad | --data | --dat | --da) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \ + | --da=*) + datadir="$ac_optarg" + pkgdatadir="$ac_optarg" ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix="$ac_optarg" ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir="$ac_optarg" ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir="$ac_optarg" ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir="$ac_optarg" + pkglibdir="$ac_optarg" ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir="$ac_optarg" + pkglibexecdir="$ac_optarg" ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst \ + | --locals | --local | --loca | --loc | --lo) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* \ + | --locals=* | --local=* | --loca=* | --loc=* | --lo=*) + localstatedir="$ac_optarg" + pkglocalstatedir="$ac_optarg" ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir="$ac_optarg" ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix="$ac_optarg" ;; + + -runtimedir | --runtimedir | --runtimedi | --runtimed | --runtime | --runtim \ + | --runti | --runt | --run | --ru | --r) + ac_prev=runtimedir ;; + -runtimedir=* | --runtimedir=* | --runtimedi=* | --runtimed=* | --runtime=* \ + | --runtim=* | --runti=* | --runt=* | --run=* | --ru=* | --r=*) + ac_prev=runtimedir ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir="$ac_optarg" + pkgsbindir="$ac_optarg" ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir="$ac_optarg" ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir="$ac_optarg" + pkgsysconfdir="$ac_optarg" ;; + + esac +done + +# Be sure to have absolute paths. +for ac_var in exec_prefix prefix +do + eval ac_val=$`echo $ac_var` + case $ac_val in + [[\\/$]]* | ?:[[\\/]]* | NONE | '' ) ;; + *) AC_MSG_ERROR([expected an absolute path for --$ac_var: $ac_val]);; + esac +done + +])dnl + +dnl TS_FLAG_HEADERS(header-file, [action-if-found], [action-if-not-found], [includes]) +dnl +AC_DEFUN([TS_FLAG_HEADERS], [ +AC_CHECK_HEADERS([$1], [$2], [$3], [$4]) +for tsc_i in $1 +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done +]) + +dnl +dnl Support macro for AC_ARG_ENABLE +dnl Arguments: +dnl 1: Variable prefix +dnl 2: Variable stem +dnl The prefix is prepended with separating underscore to the stem +dnl to create the boolean variable to be set. The stem is also used +dnl to create the name of the AC_ARG_ENABLE variable and therefore +dnl must be the same as passed to AC_ARG_ENABLE. The prefix should +dnl be one of "use", "has", or "is", as is appropriate for the +dnl argument type. The target variable will be set to '1' if the +dnl enable argument is 'yes', and '0' otherwise. +dnl +dnl For instance, if the prefix is "has" and stem is "bob", +dnl then AC_ARG_ENABLE will set $enable_bob and this macro will set +dnl $has_bob based on the value in $enable_bob. See the examples +dnl in configure.ac. +dnl +dnl Note: As with AC_ARG_ENABLE, non-alphanumeric characters are +dnl transformed to underscores. +dnl +AC_DEFUN([TS_ARG_ENABLE_VAR],[ + tsl_prefix="AS_TR_SH($1)" + tsl_stem="AS_TR_SH($2)" + eval "tsl_enable=\$enable_${tsl_stem}" + AS_IF([test "x$tsl_enable" = "xyes"], + [eval "${tsl_prefix}_${tsl_stem}=1"], + [eval "${tsl_prefix}_${tsl_stem}=0"] + ) +]) + +dnl +dnl TS_SEARCH_LIBRARY(function, search-libs, [action-if-found], [action-if-not-found]) +dnl This macro works like AC_SEARCH_LIBS, except that $LIBS is not modified. If the library +dnl is found, it is cached in the ts_cv_lib_${function} variable. +dnl +AC_DEFUN([TS_SEARCH_LIBRARY], [ + __saved_LIBS="$LIBS" + + AC_SEARCH_LIBS($1, $2, [ + dnl action-if-found + case $ac_cv_search_$1 in + "none required"|"no") ts_cv_search_$1="" ;; + *) ts_cv_search_$1=$ac_cv_search_$1 ;; + esac + m4_default([$3], [true]) + ], [ + dnl action-if-not-found + m4_default([$4], [true]) + ]) + + LIBS="$__saved_LIBS" + unset __saved_LIBS +]) diff --git a/build/crypto.m4 b/build/crypto.m4 new file mode 100644 index 00000000..f8ad19fb --- /dev/null +++ b/build/crypto.m4 @@ -0,0 +1,123 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl +dnl crypto.m4 Trafficserver's Crypto autoconf macros +dnl + +dnl +dnl TS_CHECK_CRYPTO: look for crypto libraries and headers +dnl +AC_DEFUN([TS_CHECK_CRYPTO], [ + AC_SEARCH_LIBS([crypt], [crypt], [AC_SUBST([LIBCRYPT],["-lcrypt"])]) + + AX_CHECK_OPENSSL([ + enable_crypto=yes + ], [ + AC_ERROR(failed to find OpenSSL) + enable_crypto=no + ]) + + if test "x${enable_crypto}" = "xyes"; then + TS_ADDTO(LDFLAGS, [$OPENSSL_LDFLAGS]) + TS_ADDTO(CPPFLAGS, [$OPENSSL_INCLUDES]) + fi + + dnl add checks for other varieties of ssl here +]) +dnl + +AC_DEFUN([TS_CHECK_CRYPTO_EC_KEYS], [ + _eckeys_saved_LIBS=$LIBS + TS_ADDTO(LIBS, [$OPENSSL_LIBS]) + AC_CHECK_HEADERS(openssl/ec.h) + AC_CHECK_FUNCS(EC_KEY_new_by_curve_name, [enable_tls_eckey=yes], [enable_tls_eckey=no]) + LIBS=$_eckeys_saved_LIBS + + AC_MSG_CHECKING(whether EC keys are supported) + AC_MSG_RESULT([$enable_tls_eckey]) + TS_ARG_ENABLE_VAR([use], [tls-eckey]) + AC_SUBST(use_tls_eckey) +]) + +AC_DEFUN([TS_CHECK_CRYPTO_NEXTPROTONEG], [ + enable_tls_npn=yes + _npn_saved_LIBS=$LIBS + TS_ADDTO(LIBS, [$OPENSSL_LIBS]) + AC_CHECK_FUNCS(SSL_CTX_set_next_protos_advertised_cb SSL_CTX_set_next_proto_select_cb SSL_select_next_proto SSL_get0_next_proto_negotiated, + [], [enable_tls_npn=no] + ) + LIBS=$_npn_saved_LIBS + + AC_MSG_CHECKING(whether to enable Next Protocol Negotiation TLS extension support) + AC_MSG_RESULT([$enable_tls_npn]) + TS_ARG_ENABLE_VAR([use], [tls-npn]) + AC_SUBST(use_tls_npn) +]) + +AC_DEFUN([TS_CHECK_CRYPTO_ALPN], [ + enable_tls_alpn=yes + _alpn_saved_LIBS=$LIBS + TS_ADDTO(LIBS, [$OPENSSL_LIBS]) + AC_CHECK_FUNCS(SSL_CTX_set_alpn_protos SSL_CTX_set_alpn_select_cb SSL_get0_alpn_selected SSL_select_next_proto, + [], [enable_tls_alpn=no] + ) + LIBS=$_alpn_saved_LIBS + + AC_MSG_CHECKING(whether to enable Application Layer Protocol Negotiation TLS extension support) + AC_MSG_RESULT([$enable_tls_alpn]) + TS_ARG_ENABLE_VAR([use], [tls-alpn]) + AC_SUBST(use_tls_alpn) +]) + +AC_DEFUN([TS_CHECK_CRYPTO_SNI], [ + _sni_saved_LIBS=$LIBS + enable_tls_sni=yes + + TS_ADDTO(LIBS, [$OPENSSL_LIBS]) + AC_CHECK_HEADERS(openssl/tls1.h openssl/ssl.h openssl/ts.h) + # We are looking for SSL_CTX_set_tlsext_servername_callback, but it's a + # macro, so AC_CHECK_FUNCS is not going to do the business. + AC_MSG_CHECKING([for SSL_CTX_set_tlsext_servername_callback]) + AC_COMPILE_IFELSE( + [ + AC_LANG_PROGRAM([[ +#if HAVE_OPENSSL_SSL_H +#include +#endif +#if HAVE_OPENSSL_TLS1_H +#include +#endif + ]], + [[SSL_CTX_set_tlsext_servername_callback(NULL, NULL);]]) + ], + [ + AC_MSG_RESULT([yes]) + ], + [ + AC_MSG_RESULT([no]) + enable_tls_sni=no + ]) + + AC_CHECK_FUNCS(SSL_get_servername, [], [enable_tls_sni=no]) + + LIBS=$_sni_saved_LIBS + + AC_MSG_CHECKING(whether to enable ServerNameIndication TLS extension support) + AC_MSG_RESULT([$enable_tls_sni]) + TS_ARG_ENABLE_VAR([use], [tls-sni]) + AC_SUBST(use_tls_sni) +]) diff --git a/build/jemalloc.m4 b/build/jemalloc.m4 new file mode 100644 index 00000000..55479b5c --- /dev/null +++ b/build/jemalloc.m4 @@ -0,0 +1,78 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl +dnl jemalloc.m4: Trafficserver's jemalloc autoconf macros +dnl + +AC_DEFUN([TS_CHECK_JEMALLOC], [ +enable_jemalloc=no +AC_ARG_WITH([jemalloc], [AC_HELP_STRING([--with-jemalloc=DIR], [use a specific jemalloc library])], +[ + if test "$withval" != "no"; then + if test "x${enable_tcmalloc}" = "xyes"; then + AC_MSG_ERROR([Cannot compile with both jemalloc and tcmalloc]) + fi + enable_jemalloc=yes + jemalloc_base_dir="$withval" + case "$withval" in + yes) + jemalloc_base_dir="/usr" + AC_MSG_CHECKING(checking for jemalloc includes standard directories) + ;; + *":"*) + jemalloc_include="`echo $withval |sed -e 's/:.*$//'`" + jemalloc_ldflags="`echo $withval |sed -e 's/^.*://'`" + AC_MSG_CHECKING(checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags) + ;; + *) + jemalloc_include="$withval/include" + jemalloc_ldflags="$withval/lib" + AC_MSG_CHECKING(checking for jemalloc includes in $withval) + ;; + esac + fi +]) + +jemalloch=0 +if test "$enable_jemalloc" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + jemalloc_have_headers=0 + jemalloc_have_libs=0 + if test "$jemalloc_base_dir" != "/usr"; then + TS_ADDTO(CPPFLAGS, [-I${jemalloc_include}]) + TS_ADDTO(LDFLAGS, [-L${jemalloc_ldflags}]) + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-R${jemalloc_ldflags}]) + fi + # On Darwin, jemalloc symbols are prefixed with je_. Search for that first, then fall back + # to unadorned symbols. + AC_SEARCH_LIBS([je_malloc_stats_print], [jemalloc], [jemalloc_have_libs=1], + [AC_SEARCH_LIBS([malloc_stats_print], [jemalloc], [jemalloc_have_libs=1])] + ) + if test "$jemalloc_have_libs" != "0"; then + TS_FLAG_HEADERS(jemalloc/jemalloc.h, [jemalloc_have_headers=1]) + fi + if test "$jemalloc_have_headers" != "0"; then + jemalloch=1 + TS_ADDTO(LIBS, [-ljemalloc]) + else + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi +AC_SUBST(jemalloch) +]) diff --git a/build/libtool.m4 b/build/libtool.m4 new file mode 100644 index 00000000..f12cfdf0 --- /dev/null +++ b/build/libtool.m4 @@ -0,0 +1,7992 @@ +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +m4_define([_LT_COPYING], [dnl +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +]) + +# serial 57 LT_INIT + + +# LT_PREREQ(VERSION) +# ------------------ +# Complain and exit if this libtool version is less that VERSION. +m4_defun([LT_PREREQ], +[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, + [m4_default([$3], + [m4_fatal([Libtool version $1 or higher is required], + 63)])], + [$2])]) + + +# _LT_CHECK_BUILDDIR +# ------------------ +# Complain if the absolute build directory name contains unusual characters +m4_defun([_LT_CHECK_BUILDDIR], +[case `pwd` in + *\ * | *\ *) + AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; +esac +]) + + +# LT_INIT([OPTIONS]) +# ------------------ +AC_DEFUN([LT_INIT], +[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT +AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +AC_BEFORE([$0], [LT_LANG])dnl +AC_BEFORE([$0], [LT_OUTPUT])dnl +AC_BEFORE([$0], [LTDL_INIT])dnl +m4_require([_LT_CHECK_BUILDDIR])dnl + +dnl Autoconf doesn't catch unexpanded LT_ macros by default: +m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl +m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl +dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 +dnl unless we require an AC_DEFUNed macro: +AC_REQUIRE([LTOPTIONS_VERSION])dnl +AC_REQUIRE([LTSUGAR_VERSION])dnl +AC_REQUIRE([LTVERSION_VERSION])dnl +AC_REQUIRE([LTOBSOLETE_VERSION])dnl +m4_require([_LT_PROG_LTMAIN])dnl + +_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) + +dnl Parse OPTIONS +_LT_SET_OPTIONS([$0], [$1]) + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +_LT_SETUP + +# Only expand once: +m4_define([LT_INIT]) +])# LT_INIT + +# Old names: +AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) +AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PROG_LIBTOOL], []) +dnl AC_DEFUN([AM_PROG_LIBTOOL], []) + + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +m4_defun([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +]) + + +# _LT_FILEUTILS_DEFAULTS +# ---------------------- +# It is okay to use these file commands and assume they have been set +# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. +m4_defun([_LT_FILEUTILS_DEFAULTS], +[: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} +])# _LT_FILEUTILS_DEFAULTS + + +# _LT_SETUP +# --------- +m4_defun([_LT_SETUP], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl + +_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl +dnl +_LT_DECL([], [host_alias], [0], [The host system])dnl +_LT_DECL([], [host], [0])dnl +_LT_DECL([], [host_os], [0])dnl +dnl +_LT_DECL([], [build_alias], [0], [The build system])dnl +_LT_DECL([], [build], [0])dnl +_LT_DECL([], [build_os], [0])dnl +dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +dnl +AC_REQUIRE([AC_PROG_LN_S])dnl +test -z "$LN_S" && LN_S="ln -s" +_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl +dnl +AC_REQUIRE([LT_CMD_MAX_LEN])dnl +_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl +_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl +dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl +m4_require([_LT_CMD_RELOAD])dnl +m4_require([_LT_CHECK_MAGIC_METHOD])dnl +m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl +m4_require([_LT_CMD_OLD_ARCHIVE])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_WITH_SYSROOT])dnl + +_LT_CONFIG_LIBTOOL_INIT([ +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi +]) +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +_LT_CHECK_OBJDIR + +m4_require([_LT_TAG_COMPILER])dnl + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + _LT_PATH_MAGIC + fi + ;; +esac + +# Use C for the default configuration in the libtool script +LT_SUPPORTED_TAG([CC]) +_LT_LANG_C_CONFIG +_LT_LANG_DEFAULT_CONFIG +_LT_CONFIG_COMMANDS +])# _LT_SETUP + + +# _LT_PREPARE_SED_QUOTE_VARS +# -------------------------- +# Define a few sed substitution that help us do robust quoting. +m4_defun([_LT_PREPARE_SED_QUOTE_VARS], +[# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([["`\\]]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' +]) + +# _LT_PROG_LTMAIN +# --------------- +# Note that this code is called both from `configure', and `config.status' +# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, +# `config.status' has no value for ac_aux_dir unless we are using Automake, +# so we pass a copy along to make sure it has a sensible value anyway. +m4_defun([_LT_PROG_LTMAIN], +[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl +_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) +ltmain="$ac_aux_dir/ltmain.sh" +])# _LT_PROG_LTMAIN + + +## ------------------------------------- ## +## Accumulate code for creating libtool. ## +## ------------------------------------- ## + +# So that we can recreate a full libtool script including additional +# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS +# in macros and then make a single call at the end using the `libtool' +# label. + + +# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) +# ---------------------------------------- +# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL_INIT], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_INIT], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_INIT]) + + +# _LT_CONFIG_LIBTOOL([COMMANDS]) +# ------------------------------ +# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) + + +# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) +# ----------------------------------------------------- +m4_defun([_LT_CONFIG_SAVE_COMMANDS], +[_LT_CONFIG_LIBTOOL([$1]) +_LT_CONFIG_LIBTOOL_INIT([$2]) +]) + + +# _LT_FORMAT_COMMENT([COMMENT]) +# ----------------------------- +# Add leading comment marks to the start of each line, and a trailing +# full-stop to the whole comment if one is not present already. +m4_define([_LT_FORMAT_COMMENT], +[m4_ifval([$1], [ +m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], + [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) +)]) + + + +## ------------------------ ## +## FIXME: Eliminate VARNAME ## +## ------------------------ ## + + +# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) +# ------------------------------------------------------------------- +# CONFIGNAME is the name given to the value in the libtool script. +# VARNAME is the (base) name used in the configure script. +# VALUE may be 0, 1 or 2 for a computed quote escaped value based on +# VARNAME. Any other value will be used directly. +m4_define([_LT_DECL], +[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], + [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], + [m4_ifval([$1], [$1], [$2])]) + lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) + m4_ifval([$4], + [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) + lt_dict_add_subkey([lt_decl_dict], [$2], + [tagged?], [m4_ifval([$5], [yes], [no])])]) +]) + + +# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) +# -------------------------------------------------------- +m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) + + +# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_tag_varnames], +[_lt_decl_filter([tagged?], [yes], $@)]) + + +# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) +# --------------------------------------------------------- +m4_define([_lt_decl_filter], +[m4_case([$#], + [0], [m4_fatal([$0: too few arguments: $#])], + [1], [m4_fatal([$0: too few arguments: $#: $1])], + [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], + [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], + [lt_dict_filter([lt_decl_dict], $@)])[]dnl +]) + + +# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) +# -------------------------------------------------- +m4_define([lt_decl_quote_varnames], +[_lt_decl_filter([value], [1], $@)]) + + +# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_dquote_varnames], +[_lt_decl_filter([value], [2], $@)]) + + +# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_varnames_tagged], +[m4_assert([$# <= 2])dnl +_$0(m4_quote(m4_default([$1], [[, ]])), + m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), + m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) +m4_define([_lt_decl_varnames_tagged], +[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) + + +# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_all_varnames], +[_$0(m4_quote(m4_default([$1], [[, ]])), + m4_if([$2], [], + m4_quote(lt_decl_varnames), + m4_quote(m4_shift($@))))[]dnl +]) +m4_define([_lt_decl_all_varnames], +[lt_join($@, lt_decl_varnames_tagged([$1], + lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl +]) + + +# _LT_CONFIG_STATUS_DECLARE([VARNAME]) +# ------------------------------------ +# Quote a variable value, and forward it to `config.status' so that its +# declaration there will have the same value as in `configure'. VARNAME +# must have a single quote delimited value for this to work. +m4_define([_LT_CONFIG_STATUS_DECLARE], +[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) + + +# _LT_CONFIG_STATUS_DECLARATIONS +# ------------------------------ +# We delimit libtool config variables with single quotes, so when +# we write them to config.status, we have to be sure to quote all +# embedded single quotes properly. In configure, this macro expands +# each variable declared with _LT_DECL (and _LT_TAGDECL) into: +# +# ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' +m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], +[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), + [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAGS +# ---------------- +# Output comment and list of tags supported by the script +m4_defun([_LT_LIBTOOL_TAGS], +[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl +available_tags="_LT_TAGS"dnl +]) + + +# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) +# ----------------------------------- +# Extract the dictionary values for VARNAME (optionally with TAG) and +# expand to a commented shell variable setting: +# +# # Some comment about what VAR is for. +# visible_name=$lt_internal_name +m4_define([_LT_LIBTOOL_DECLARE], +[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], + [description])))[]dnl +m4_pushdef([_libtool_name], + m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl +m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), + [0], [_libtool_name=[$]$1], + [1], [_libtool_name=$lt_[]$1], + [2], [_libtool_name=$lt_[]$1], + [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl +m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl +]) + + +# _LT_LIBTOOL_CONFIG_VARS +# ----------------------- +# Produce commented declarations of non-tagged libtool config variables +# suitable for insertion in the LIBTOOL CONFIG section of the `libtool' +# script. Tagged libtool config variables (even for the LIBTOOL CONFIG +# section) are produced by _LT_LIBTOOL_TAG_VARS. +m4_defun([_LT_LIBTOOL_CONFIG_VARS], +[m4_foreach([_lt_var], + m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAG_VARS(TAG) +# ------------------------- +m4_define([_LT_LIBTOOL_TAG_VARS], +[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) + + +# _LT_TAGVAR(VARNAME, [TAGNAME]) +# ------------------------------ +m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) + + +# _LT_CONFIG_COMMANDS +# ------------------- +# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of +# variables for single and double quote escaping we saved from calls +# to _LT_DECL, we can put quote escaped variables declarations +# into `config.status', and then the shell code to quote escape them in +# for loops in `config.status'. Finally, any additional code accumulated +# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. +m4_defun([_LT_CONFIG_COMMANDS], +[AC_PROVIDE_IFELSE([LT_OUTPUT], + dnl If the libtool generation code has been placed in $CONFIG_LT, + dnl instead of duplicating it all over again into config.status, + dnl then we will have config.status run $CONFIG_LT later, so it + dnl needs to know what name is stored there: + [AC_CONFIG_COMMANDS([libtool], + [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], + dnl If the libtool generation code is destined for config.status, + dnl expand the accumulated commands and init code now: + [AC_CONFIG_COMMANDS([libtool], + [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) +])#_LT_CONFIG_COMMANDS + + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], +[ + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +_LT_CONFIG_STATUS_DECLARATIONS +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$[]1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_quote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_dquote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +_LT_OUTPUT_LIBTOOL_INIT +]) + +# _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) +# ------------------------------------ +# Generate a child script FILE with all initialization necessary to +# reuse the environment learned by the parent script, and make the +# file executable. If COMMENT is supplied, it is inserted after the +# `#!' sequence but before initialization text begins. After this +# macro, additional text can be appended to FILE to form the body of +# the child script. The macro ends with non-zero status if the +# file could not be fully written (such as if the disk is full). +m4_ifdef([AS_INIT_GENERATED], +[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], +[m4_defun([_LT_GENERATED_FILE_INIT], +[m4_require([AS_PREPARE])]dnl +[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl +[lt_write_fail=0 +cat >$1 <<_ASEOF || lt_write_fail=1 +#! $SHELL +# Generated by $as_me. +$2 +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$1 <<\_ASEOF || lt_write_fail=1 +AS_SHELL_SANITIZE +_AS_PREPARE +exec AS_MESSAGE_FD>&1 +_ASEOF +test $lt_write_fail = 0 && chmod +x $1[]dnl +m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT + +# LT_OUTPUT +# --------- +# This macro allows early generation of the libtool script (before +# AC_OUTPUT is called), incase it is used in configure for compilation +# tests. +AC_DEFUN([LT_OUTPUT], +[: ${CONFIG_LT=./config.lt} +AC_MSG_NOTICE([creating $CONFIG_LT]) +_LT_GENERATED_FILE_INIT(["$CONFIG_LT"], +[# Run this file to recreate a libtool stub with the current configuration.]) + +cat >>"$CONFIG_LT" <<\_LTEOF +lt_cl_silent=false +exec AS_MESSAGE_LOG_FD>>config.log +{ + echo + AS_BOX([Running $as_me.]) +} >&AS_MESSAGE_LOG_FD + +lt_cl_help="\ +\`$as_me' creates a local libtool stub from the current configuration, +for use in further configure time tests before the real libtool is +generated. + +Usage: $[0] [[OPTIONS]] + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + +Report bugs to ." + +lt_cl_version="\ +m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl +m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) +configured by $[0], generated by m4_PACKAGE_STRING. + +Copyright (C) 2011 Free Software Foundation, Inc. +This config.lt script is free software; the Free Software Foundation +gives unlimited permision to copy, distribute and modify it." + +while test $[#] != 0 +do + case $[1] in + --version | --v* | -V ) + echo "$lt_cl_version"; exit 0 ;; + --help | --h* | -h ) + echo "$lt_cl_help"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --quiet | --q* | --silent | --s* | -q ) + lt_cl_silent=: ;; + + -*) AC_MSG_ERROR([unrecognized option: $[1] +Try \`$[0] --help' for more information.]) ;; + + *) AC_MSG_ERROR([unrecognized argument: $[1] +Try \`$[0] --help' for more information.]) ;; + esac + shift +done + +if $lt_cl_silent; then + exec AS_MESSAGE_FD>/dev/null +fi +_LTEOF + +cat >>"$CONFIG_LT" <<_LTEOF +_LT_OUTPUT_LIBTOOL_COMMANDS_INIT +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AC_MSG_NOTICE([creating $ofile]) +_LT_OUTPUT_LIBTOOL_COMMANDS +AS_EXIT(0) +_LTEOF +chmod +x "$CONFIG_LT" + +# configure is writing to config.log, but config.lt does its own redirection, +# appending to config.log, which fails on DOS, as config.log is still kept +# open by configure. Here we exec the FD to /dev/null, effectively closing +# config.log, so it can be properly (re)opened and appended to by config.lt. +lt_cl_success=: +test "$silent" = yes && + lt_config_lt_args="$lt_config_lt_args --quiet" +exec AS_MESSAGE_LOG_FD>/dev/null +$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false +exec AS_MESSAGE_LOG_FD>>config.log +$lt_cl_success || AS_EXIT(1) +])# LT_OUTPUT + + +# _LT_CONFIG(TAG) +# --------------- +# If TAG is the built-in tag, create an initial libtool script with a +# default configuration from the untagged config vars. Otherwise add code +# to config.status for appending the configuration named by TAG from the +# matching tagged config vars. +m4_defun([_LT_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_CONFIG_SAVE_COMMANDS([ + m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl + m4_if(_LT_TAG, [C], [ + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +_LT_COPYING +_LT_LIBTOOL_TAGS + +# ### BEGIN LIBTOOL CONFIG +_LT_LIBTOOL_CONFIG_VARS +_LT_LIBTOOL_TAG_VARS +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + _LT_PROG_LTMAIN + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + _LT_PROG_REPLACE_SHELLFNS + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +], +[cat <<_LT_EOF >> "$ofile" + +dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded +dnl in a comment (ie after a #). +# ### BEGIN LIBTOOL TAG CONFIG: $1 +_LT_LIBTOOL_TAG_VARS(_LT_TAG) +# ### END LIBTOOL TAG CONFIG: $1 +_LT_EOF +])dnl /m4_if +], +[m4_if([$1], [], [ + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile'], []) +])dnl /_LT_CONFIG_SAVE_COMMANDS +])# _LT_CONFIG + + +# LT_SUPPORTED_TAG(TAG) +# --------------------- +# Trace this macro to discover what tags are supported by the libtool +# --tag option, using: +# autoconf --trace 'LT_SUPPORTED_TAG:$1' +AC_DEFUN([LT_SUPPORTED_TAG], []) + + +# C support is built-in for now +m4_define([_LT_LANG_C_enabled], []) +m4_define([_LT_TAGS], []) + + +# LT_LANG(LANG) +# ------------- +# Enable libtool support for the given language if not already enabled. +AC_DEFUN([LT_LANG], +[AC_BEFORE([$0], [LT_OUTPUT])dnl +m4_case([$1], + [C], [_LT_LANG(C)], + [C++], [_LT_LANG(CXX)], + [Go], [_LT_LANG(GO)], + [Java], [_LT_LANG(GCJ)], + [Fortran 77], [_LT_LANG(F77)], + [Fortran], [_LT_LANG(FC)], + [Windows Resource], [_LT_LANG(RC)], + [m4_ifdef([_LT_LANG_]$1[_CONFIG], + [_LT_LANG($1)], + [m4_fatal([$0: unsupported language: "$1"])])])dnl +])# LT_LANG + + +# _LT_LANG(LANGNAME) +# ------------------ +m4_defun([_LT_LANG], +[m4_ifdef([_LT_LANG_]$1[_enabled], [], + [LT_SUPPORTED_TAG([$1])dnl + m4_append([_LT_TAGS], [$1 ])dnl + m4_define([_LT_LANG_]$1[_enabled], [])dnl + _LT_LANG_$1_CONFIG($1)])dnl +])# _LT_LANG + + +m4_ifndef([AC_PROG_GO], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_GO. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ +m4_defun([AC_PROG_GO], +[AC_LANG_PUSH(Go)dnl +AC_ARG_VAR([GOC], [Go compiler command])dnl +AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl +_AC_ARG_VAR_LDFLAGS()dnl +AC_CHECK_TOOL(GOC, gccgo) +if test -z "$GOC"; then + if test -n "$ac_tool_prefix"; then + AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) + fi +fi +if test -z "$GOC"; then + AC_CHECK_PROG(GOC, gccgo, gccgo, false) +fi +])#m4_defun +])#m4_ifndef + + +# _LT_LANG_DEFAULT_CONFIG +# ----------------------- +m4_defun([_LT_LANG_DEFAULT_CONFIG], +[AC_PROVIDE_IFELSE([AC_PROG_CXX], + [LT_LANG(CXX)], + [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) + +AC_PROVIDE_IFELSE([AC_PROG_F77], + [LT_LANG(F77)], + [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) + +AC_PROVIDE_IFELSE([AC_PROG_FC], + [LT_LANG(FC)], + [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) + +dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal +dnl pulling things in needlessly. +AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([LT_PROG_GCJ], + [LT_LANG(GCJ)], + [m4_ifdef([AC_PROG_GCJ], + [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([A][M_PROG_GCJ], + [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([LT_PROG_GCJ], + [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) + +AC_PROVIDE_IFELSE([AC_PROG_GO], + [LT_LANG(GO)], + [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) + +AC_PROVIDE_IFELSE([LT_PROG_RC], + [LT_LANG(RC)], + [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) +])# _LT_LANG_DEFAULT_CONFIG + +# Obsolete macros: +AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) +AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) +AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) +AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) +AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_CXX], []) +dnl AC_DEFUN([AC_LIBTOOL_F77], []) +dnl AC_DEFUN([AC_LIBTOOL_FC], []) +dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) +dnl AC_DEFUN([AC_LIBTOOL_RC], []) + + +# _LT_TAG_COMPILER +# ---------------- +m4_defun([_LT_TAG_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl +_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl +_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl +_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_TAG_COMPILER + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +m4_defun([_LT_COMPILER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +m4_defun([_LT_LINKER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* +])# _LT_LINKER_BOILERPLATE + +# _LT_REQUIRED_DARWIN_CHECKS +# ------------------------- +m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ + case $host_os in + rhapsody* | darwin*) + AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) + AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) + AC_CHECK_TOOL([LIPO], [lipo], [:]) + AC_CHECK_TOOL([OTOOL], [otool], [:]) + AC_CHECK_TOOL([OTOOL64], [otool64], [:]) + _LT_DECL([], [DSYMUTIL], [1], + [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) + _LT_DECL([], [NMEDIT], [1], + [Tool to change global to local symbols on Mac OS X]) + _LT_DECL([], [LIPO], [1], + [Tool to manipulate fat objects and archives on Mac OS X]) + _LT_DECL([], [OTOOL], [1], + [ldd/readelf like tool for Mach-O binaries on Mac OS X]) + _LT_DECL([], [OTOOL64], [1], + [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) + + AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], + [lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&AS_MESSAGE_LOG_FD + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test $_lt_result -eq 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi]) + + AC_CACHE_CHECK([for -exported_symbols_list linker flag], + [lt_cv_ld_exported_symbols_list], + [lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [lt_cv_ld_exported_symbols_list=yes], + [lt_cv_ld_exported_symbols_list=no]) + LDFLAGS="$save_LDFLAGS" + ]) + + AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], + [lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD + echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD + $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD + echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD + $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&AS_MESSAGE_LOG_FD + elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + ]) + case $host_os in + rhapsody* | darwin1.[[012]]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[[012]]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac +]) + + +# _LT_DARWIN_LINKER_FEATURES([TAG]) +# --------------------------------- +# Checks for linker and compiler features on darwin +m4_defun([_LT_DARWIN_LINKER_FEATURES], +[ + m4_require([_LT_REQUIRED_DARWIN_CHECKS]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_automatic, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], + [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='' + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + m4_if([$1], [CXX], +[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then + _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi +],[]) + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi +]) + +# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) +# ---------------------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +# Store the results from the different compilers for each TAGNAME. +# Allow to override them for all tags through lt_cv_aix_libpath. +m4_defun([_LT_SYS_MODULE_PATH_AIX], +[m4_require([_LT_DECL_SED])dnl +if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], + [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ + lt_aix_libpath_sed='[ + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }]' + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi],[]) + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib" + fi + ]) + aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) +fi +])# _LT_SYS_MODULE_PATH_AIX + + +# _LT_SHELL_INIT(ARG) +# ------------------- +m4_define([_LT_SHELL_INIT], +[m4_divert_text([M4SH-INIT], [$1 +])])# _LT_SHELL_INIT + + + +# _LT_PROG_ECHO_BACKSLASH +# ----------------------- +# Find how we can fake an echo command that does not interpret backslash. +# In particular, with Autoconf 2.60 or later we add some code to the start +# of the generated configure script which will find a shell with a builtin +# printf (which we can use as an echo command). +m4_defun([_LT_PROG_ECHO_BACKSLASH], +[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +AC_MSG_CHECKING([how to print strings]) +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$[]1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +case "$ECHO" in + printf*) AC_MSG_RESULT([printf]) ;; + print*) AC_MSG_RESULT([print -r]) ;; + *) AC_MSG_RESULT([cat]) ;; +esac + +m4_ifdef([_AS_DETECT_SUGGESTED], +[_AS_DETECT_SUGGESTED([ + test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test "X`printf %s $ECHO`" = "X$ECHO" \ + || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) + +_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) +_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) +])# _LT_PROG_ECHO_BACKSLASH + + +# _LT_WITH_SYSROOT +# ---------------- +AC_DEFUN([_LT_WITH_SYSROOT], +[AC_MSG_CHECKING([for sysroot]) +AC_ARG_WITH([sysroot], +[ --with-sysroot[=DIR] Search for dependent libraries within DIR + (or the compiler's sysroot if not specified).], +[], [with_sysroot=no]) + +dnl lt_sysroot will always be passed unquoted. We quote it here +dnl in case the user passed a directory name. +lt_sysroot= +case ${with_sysroot} in #( + yes) + if test "$GCC" = yes; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + AC_MSG_RESULT([${with_sysroot}]) + AC_MSG_ERROR([The sysroot must be an absolute path.]) + ;; +esac + + AC_MSG_RESULT([${lt_sysroot:-no}]) +_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl +[dependent libraries, and in which our libraries should be installed.])]) + +# _LT_ENABLE_LOCK +# --------------- +m4_defun([_LT_ENABLE_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AS_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + powerpc64le-*linux*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*linux*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD="${LD-ld}_sol2" + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" +])# _LT_ENABLE_LOCK + + +# _LT_PROG_AR +# ----------- +m4_defun([_LT_PROG_AR], +[AC_CHECK_TOOLS(AR, [ar], false) +: ${AR=ar} +: ${AR_FLAGS=cru} +_LT_DECL([], [AR], [1], [The archiver]) +_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) + +AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], + [lt_cv_ar_at_file=no + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' + AC_TRY_EVAL([lt_ar_try]) + if test "$ac_status" -eq 0; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + AC_TRY_EVAL([lt_ar_try]) + if test "$ac_status" -ne 0; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + ]) + ]) + +if test "x$lt_cv_ar_at_file" = xno; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi +_LT_DECL([], [archiver_list_spec], [1], + [How to feed a file listing to the archiver]) +])# _LT_PROG_AR + + +# _LT_CMD_OLD_ARCHIVE +# ------------------- +m4_defun([_LT_CMD_OLD_ARCHIVE], +[_LT_PROG_AR + +AC_CHECK_TOOL(STRIP, strip, :) +test -z "$STRIP" && STRIP=: +_LT_DECL([], [STRIP], [1], [A symbol stripping program]) + +AC_CHECK_TOOL(RANLIB, ranlib, :) +test -z "$RANLIB" && RANLIB=: +_LT_DECL([], [RANLIB], [1], + [Commands used to install an old-style archive]) + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac +_LT_DECL([], [old_postinstall_cmds], [2]) +_LT_DECL([], [old_postuninstall_cmds], [2]) +_LT_TAGDECL([], [old_archive_cmds], [2], + [Commands used to build an old-style archive]) +_LT_DECL([], [lock_old_archive_extraction], [0], + [Whether to use a lock for old archive extraction]) +])# _LT_CMD_OLD_ARCHIVE + + +# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([_LT_COMPILER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $RM conftest* +]) + +if test x"[$]$2" = xyes; then + m4_if([$5], , :, [$5]) +else + m4_if([$6], , :, [$6]) +fi +])# _LT_COMPILER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) + + +# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------- +# Check whether the given linker option works +AC_DEFUN([_LT_LINKER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + m4_if([$4], , :, [$4]) +else + m4_if([$5], , :, [$5]) +fi +])# _LT_LINKER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) + + +# LT_CMD_MAX_LEN +#--------------- +AC_DEFUN([LT_CMD_MAX_LEN], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +max_cmd_len=$lt_cv_sys_max_cmd_len +_LT_DECL([], [max_cmd_len], [0], + [What is the maximum length of a command?]) +])# LT_CMD_MAX_LEN + +# Old name: +AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) + + +# _LT_HEADER_DLFCN +# ---------------- +m4_defun([_LT_HEADER_DLFCN], +[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl +])# _LT_HEADER_DLFCN + + +# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# ---------------------------------------------------------------- +m4_defun([_LT_TRY_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +[#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +}] +_LT_EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_TRY_DLOPEN_SELF + + +# LT_SYS_DLOPEN_SELF +# ------------------ +AC_DEFUN([LT_SYS_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +_LT_DECL([dlopen_support], [enable_dlopen], [0], + [Whether dlopen is supported]) +_LT_DECL([dlopen_self], [enable_dlopen_self], [0], + [Whether dlopen of programs is supported]) +_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], + [Whether dlopen of statically linked programs is supported]) +])# LT_SYS_DLOPEN_SELF + +# Old name: +AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) + + +# _LT_COMPILER_C_O([TAGNAME]) +# --------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler. +# This macro does not hard code the compiler like AC_PROG_CC_C_O. +m4_defun([_LT_COMPILER_C_O], +[m4_require([_LT_DECL_SED])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* +]) +_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], + [Does compiler simultaneously support -c and -o options?]) +])# _LT_COMPILER_C_O + + +# _LT_COMPILER_FILE_LOCKS([TAGNAME]) +# ---------------------------------- +# Check to see if we can do hard links to lock some files if needed +m4_defun([_LT_COMPILER_FILE_LOCKS], +[m4_require([_LT_ENABLE_LOCK])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_COMPILER_C_O([$1]) + +hard_links="nottested" +if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) +])# _LT_COMPILER_FILE_LOCKS + + +# _LT_CHECK_OBJDIR +# ---------------- +m4_defun([_LT_CHECK_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +_LT_DECL([], [objdir], [0], + [The name of the directory that contains temporary libtool files])dnl +m4_pattern_allow([LT_OBJDIR])dnl +AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", + [Define to the sub-directory in which libtool stores uninstalled libraries.]) +])# _LT_CHECK_OBJDIR + + +# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) +# -------------------------------------- +# Check hardcoding attributes. +m4_defun([_LT_LINKER_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || + test -n "$_LT_TAGVAR(runpath_var, $1)" || + test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || + test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +_LT_TAGDECL([], [hardcode_action], [0], + [How to hardcode a shared library path into an executable]) +])# _LT_LINKER_HARDCODE_LIBPATH + + +# _LT_CMD_STRIPLIB +# ---------------- +m4_defun([_LT_CMD_STRIPLIB], +[m4_require([_LT_DECL_EGREP]) +striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) +_LT_DECL([], [striplib], [1]) +])# _LT_CMD_STRIPLIB + + +# _LT_SYS_DYNAMIC_LINKER([TAG]) +# ----------------------------- +# PORTME Fill in your ld.so characteristics +m4_defun([_LT_SYS_DYNAMIC_LINKER], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_OBJDUMP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +AC_MSG_CHECKING([dynamic linker characteristics]) +m4_if([$1], + [], [ +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;; + *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[[lt_foo]]++; } + if (lt_freq[[lt_foo]] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's,/\([[A-Za-z]]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[[4-9]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[23]].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[[3-9]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], + [lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ + LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], + [lt_cv_shlibpath_overrides_runpath=yes])]) + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + ]) + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Add ABI-specific directories to the system library path. + sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" + + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[[89]] | openbsd2.[[89]].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + +_LT_DECL([], [variables_saved_for_relink], [1], + [Variables whose values should be saved in libtool wrapper scripts and + restored at link time]) +_LT_DECL([], [need_lib_prefix], [0], + [Do we need the "lib" prefix for modules?]) +_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) +_LT_DECL([], [version_type], [0], [Library versioning type]) +_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) +_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) +_LT_DECL([], [shlibpath_overrides_runpath], [0], + [Is shlibpath searched before the hard-coded library search path?]) +_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) +_LT_DECL([], [library_names_spec], [1], + [[List of archive names. First name is the real one, the rest are links. + The last name is the one that the linker finds with -lNAME]]) +_LT_DECL([], [soname_spec], [1], + [[The coded name of the library, if different from the real name]]) +_LT_DECL([], [install_override_mode], [1], + [Permission mode override for installation of shared libraries]) +_LT_DECL([], [postinstall_cmds], [2], + [Command to use after installation of a shared archive]) +_LT_DECL([], [postuninstall_cmds], [2], + [Command to use after uninstallation of a shared archive]) +_LT_DECL([], [finish_cmds], [2], + [Commands used to finish a libtool library installation in a directory]) +_LT_DECL([], [finish_eval], [1], + [[As "finish_cmds", except a single script fragment to be evaled but + not shown]]) +_LT_DECL([], [hardcode_into_libs], [0], + [Whether we should hardcode library paths into libraries]) +_LT_DECL([], [sys_lib_search_path_spec], [2], + [Compile-time system search path for libraries]) +_LT_DECL([], [sys_lib_dlsearch_path_spec], [2], + [Run-time system search path for libraries]) +])# _LT_SYS_DYNAMIC_LINKER + + +# _LT_PATH_TOOL_PREFIX(TOOL) +# -------------------------- +# find a file program which can recognize shared library +AC_DEFUN([_LT_PATH_TOOL_PREFIX], +[m4_require([_LT_DECL_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="m4_if([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/$1; then + lt_cv_path_MAGIC_CMD="$ac_dir/$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac]) +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +_LT_DECL([], [MAGIC_CMD], [0], + [Used to examine libraries when file_magic_cmd begins with "file"])dnl +])# _LT_PATH_TOOL_PREFIX + +# Old name: +AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) + + +# _LT_PATH_MAGIC +# -------------- +# find a file program which can recognize a shared library +m4_defun([_LT_PATH_MAGIC], +[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# _LT_PATH_MAGIC + + +# LT_PATH_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([LT_PATH_LD], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PROG_ECHO_BACKSLASH])dnl + +AC_ARG_WITH([gnu-ld], + [AS_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no])dnl + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[[3-9]]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + +_LT_DECL([], [deplibs_check_method], [1], + [Method to check whether dependent libraries are shared objects]) +_LT_DECL([], [file_magic_cmd], [1], + [Command to use when deplibs_check_method = "file_magic"]) +_LT_DECL([], [file_magic_glob], [1], + [How to find potential files when deplibs_check_method = "file_magic"]) +_LT_DECL([], [want_nocaseglob], [1], + [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) +])# _LT_CHECK_MAGIC_METHOD + + +# LT_PATH_NM +# ---------- +# find the pathname to a BSD- or MS-compatible name lister +AC_DEFUN([LT_PATH_NM], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi]) +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) + case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: + ;; + esac + fi + AC_SUBST([DUMPBIN]) + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm +AC_SUBST([NM]) +_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl + +AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], + [lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) + cat conftest.out >&AS_MESSAGE_LOG_FD + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest*]) +])# LT_PATH_NM + +# Old names: +AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) +AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_NM], []) +dnl AC_DEFUN([AC_PROG_NM], []) + +# _LT_CHECK_SHAREDLIB_FROM_LINKLIB +# -------------------------------- +# how to determine the name of the shared library +# associated with a specific link library. +# -- PORTME fill in with the dynamic library characteristics +m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], +[m4_require([_LT_DECL_EGREP]) +m4_require([_LT_DECL_OBJDUMP]) +m4_require([_LT_DECL_DLLTOOL]) +AC_CACHE_CHECK([how to associate runtime and link libraries], +lt_cv_sharedlib_from_linklib_cmd, +[lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh + # decide which to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd="$ECHO" + ;; +esac +]) +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + +_LT_DECL([], [sharedlib_from_linklib_cmd], [1], + [Command to associate shared and link libraries]) +])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB + + +# _LT_PATH_MANIFEST_TOOL +# ---------------------- +# locate the manifest tool +m4_defun([_LT_PATH_MANIFEST_TOOL], +[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], + [lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&AS_MESSAGE_LOG_FD + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest*]) +if test "x$lt_cv_path_mainfest_tool" != xyes; then + MANIFEST_TOOL=: +fi +_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl +])# _LT_PATH_MANIFEST_TOOL + + +# LT_LIB_M +# -------- +# check for math library +AC_DEFUN([LT_LIB_M], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM="-lm") + ;; +esac +AC_SUBST([LIBM]) +])# LT_LIB_M + +# Old name: +AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_CHECK_LIBM], []) + + +# _LT_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------- +m4_defun([_LT_COMPILER_NO_RTTI], +[m4_require([_LT_TAG_COMPILER])dnl + +_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; + *) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; + esac + + _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], + [Compiler flag to turn off builtin functions]) +])# _LT_COMPILER_NO_RTTI + + +# _LT_CMD_GLOBAL_SYMBOLS +# ---------------------- +m4_defun([_LT_CMD_GLOBAL_SYMBOLS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([LT_PATH_NM])dnl +AC_REQUIRE([LT_PATH_LD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_TAG_COMPILER])dnl + +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK ['"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx]" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if AC_TRY_EVAL(ac_compile); then + # Now try to grab the symbols. + nlist=conftest.nm + if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT@&t@_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT@&t@_DLSYM_CONST +#else +# define LT@&t@_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT@&t@_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[[]] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + +_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], + [Take the output of nm and produce a listing of raw symbols and C names]) +_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], + [Transform the output of nm in a proper C declaration]) +_LT_DECL([global_symbol_to_c_name_address], + [lt_cv_sys_global_symbol_to_c_name_address], [1], + [Transform the output of nm in a C name address pair]) +_LT_DECL([global_symbol_to_c_name_address_lib_prefix], + [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], + [Transform the output of nm in a C name address pair when lib prefix is needed]) +_LT_DECL([], [nm_file_list_spec], [1], + [Specify filename containing input files for $NM]) +]) # _LT_CMD_GLOBAL_SYMBOLS + + +# _LT_COMPILER_PIC([TAGNAME]) +# --------------------------- +m4_defun([_LT_COMPILER_PIC], +[m4_require([_LT_TAG_COMPILER])dnl +_LT_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_TAGVAR(lt_prog_compiler_static, $1)= + +m4_if([$1], [CXX], [ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix[[4-9]]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test "$GCC" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' + if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + hpux9* | hpux10* | hpux11*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' + _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' + ;; + nagfor*) + # NAG Fortran compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='' + ;; + *Sun\ F* | *Sun*Fortran*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + *Intel*\ [[CF]]*Compiler*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + *Portland\ Group*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + rdos*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + solaris*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + unicos*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" + ;; +esac + +AC_CACHE_CHECK([for $compiler option to produce PIC], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], + [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], + [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], + [Additional compiler flags for building library objects]) + +_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], + [How to pass a linker flag through the compiler]) +# +# Check to make sure the static flag actually works. +# +wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" +_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) +_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], + [Compiler flag to prevent dynamic linking]) +])# _LT_COMPILER_PIC + + +# _LT_LINKER_SHLIBS([TAGNAME]) +# ---------------------------- +# See if the linker supports building shared libraries. +m4_defun([_LT_LINKER_SHLIBS], +[AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +m4_if([$1], [CXX], [ + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + case $host_os in + aix[[4-9]]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global defined + # symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + ;; + esac + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +], [ + runpath_var= + _LT_TAGVAR(allow_undefined_flag, $1)= + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(archive_cmds, $1)= + _LT_TAGVAR(archive_expsym_cmds, $1)= + _LT_TAGVAR(compiler_needs_object, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(hardcode_automatic, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_separator, $1)= + _LT_TAGVAR(hardcode_minus_L, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(inherit_rpath, $1)=no + _LT_TAGVAR(link_all_deplibs, $1)=unknown + _LT_TAGVAR(module_cmds, $1)= + _LT_TAGVAR(module_expsym_cmds, $1)= + _LT_TAGVAR(old_archive_from_new_cmds, $1)= + _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_TAGVAR(thread_safe_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. +dnl Note also adjust exclude_expsyms for C++ above. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + _LT_TAGVAR(ld_shlibs, $1)=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; + *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[[3-9]]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + _LT_TAGVAR(whole_archive_flag_spec, $1)= + tmp_sharedflag='--shared' ;; + xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + sunos4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global + # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + bsdi[[45]]*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + esac + ;; + + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + m4_if($1, [], [ + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + _LT_LINKER_OPTION([if $CC understands -b], + _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], + [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) + ;; + esac + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], + [lt_cv_irix_exported_symbol], + [save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + AC_LINK_IFELSE( + [AC_LANG_SOURCE( + [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], + [C++], [[int foo (void) { return 0; }]], + [Fortran 77], [[ + subroutine foo + end]], + [Fortran], [[ + subroutine foo + end]])])], + [lt_cv_irix_exported_symbol=yes], + [lt_cv_irix_exported_symbol=no]) + LDFLAGS="$save_LDFLAGS"]) + if test "$lt_cv_irix_exported_symbol" = yes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + fi + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + solaris*) + _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + fi + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' + ;; + esac + fi + fi +]) +AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld + +_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl +_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl +_LT_DECL([], [extract_expsyms_cmds], [2], + [The commands to extract the exported symbol list from a shared archive]) + +# +# Do we need to explicitly link libc? +# +case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_CACHE_CHECK([whether -lc should be explicitly linked in], + [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), + [$RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) + _LT_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) + then + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no + else + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + ]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) + ;; + esac + fi + ;; +esac + +_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], + [Whether or not to add -lc for building shared libraries]) +_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], + [enable_shared_with_static_runtimes], [0], + [Whether or not to disallow shared libs when runtime libs are static]) +_LT_TAGDECL([], [export_dynamic_flag_spec], [1], + [Compiler flag to allow reflexive dlopens]) +_LT_TAGDECL([], [whole_archive_flag_spec], [1], + [Compiler flag to generate shared objects directly from archives]) +_LT_TAGDECL([], [compiler_needs_object], [1], + [Whether the compiler copes with passing no objects directly]) +_LT_TAGDECL([], [old_archive_from_new_cmds], [2], + [Create an old-style archive from a shared archive]) +_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], + [Create a temporary old-style archive to link instead of a shared archive]) +_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) +_LT_TAGDECL([], [archive_expsym_cmds], [2]) +_LT_TAGDECL([], [module_cmds], [2], + [Commands used to build a loadable module if different from building + a shared archive.]) +_LT_TAGDECL([], [module_expsym_cmds], [2]) +_LT_TAGDECL([], [with_gnu_ld], [1], + [Whether we are building with GNU ld or not]) +_LT_TAGDECL([], [allow_undefined_flag], [1], + [Flag that allows shared libraries with undefined symbols to be built]) +_LT_TAGDECL([], [no_undefined_flag], [1], + [Flag that enforces no undefined symbols]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], + [Flag to hardcode $libdir into a binary during linking. + This must work even if $libdir does not exist]) +_LT_TAGDECL([], [hardcode_libdir_separator], [1], + [Whether we need a single "-rpath" flag with a separated argument]) +_LT_TAGDECL([], [hardcode_direct], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary]) +_LT_TAGDECL([], [hardcode_direct_absolute], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary and the resulting library dependency is + "absolute", i.e impossible to change by setting ${shlibpath_var} if the + library is relocated]) +_LT_TAGDECL([], [hardcode_minus_L], [0], + [Set to "yes" if using the -LDIR flag during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_shlibpath_var], [0], + [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_automatic], [0], + [Set to "yes" if building a shared library automatically hardcodes DIR + into the library and all subsequent libraries and executables linked + against it]) +_LT_TAGDECL([], [inherit_rpath], [0], + [Set to yes if linker adds runtime paths of dependent libraries + to runtime path list]) +_LT_TAGDECL([], [link_all_deplibs], [0], + [Whether libtool must link a program against all its dependency libraries]) +_LT_TAGDECL([], [always_export_symbols], [0], + [Set to "yes" if exported symbols are required]) +_LT_TAGDECL([], [export_symbols_cmds], [2], + [The commands to list exported symbols]) +_LT_TAGDECL([], [exclude_expsyms], [1], + [Symbols that should not be listed in the preloaded symbols]) +_LT_TAGDECL([], [include_expsyms], [1], + [Symbols that must always be exported]) +_LT_TAGDECL([], [prelink_cmds], [2], + [Commands necessary for linking programs (against libraries) with templates]) +_LT_TAGDECL([], [postlink_cmds], [2], + [Commands necessary for finishing linking programs]) +_LT_TAGDECL([], [file_list_spec], [1], + [Specify filename containing input files]) +dnl FIXME: Not yet implemented +dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], +dnl [Compiler flag to generate thread safe objects]) +])# _LT_LINKER_SHLIBS + + +# _LT_LANG_C_CONFIG([TAG]) +# ------------------------ +# Ensure that the configuration variables for a C compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_C_CONFIG], +[m4_require([_LT_DECL_EGREP])dnl +lt_save_CC="$CC" +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + +_LT_TAG_COMPILER +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + LT_SYS_DLOPEN_SELF + _LT_CMD_STRIPLIB + + # Report which library types will actually be built + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_CONFIG($1) +fi +AC_LANG_POP +CC="$lt_save_CC" +])# _LT_LANG_C_CONFIG + + +# _LT_LANG_CXX_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a C++ compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_CXX_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +else + _lt_caught_CXX_error=yes +fi + +AC_LANG_PUSH(C++) +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(compiler_needs_object, $1)=no +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + else + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + fi + + if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + LT_PATH_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) + _LT_TAGVAR(ld_shlibs, $1)=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + freebsd-elf*) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + gnu*) + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + hpux9*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) + _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + case $host in + osf3*) + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + ;; + *) + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ + '"$_LT_TAGVAR(old_archive_cmds, $1)" + _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ + '"$_LT_TAGVAR(reload_cmds, $1)" + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) + test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + + _LT_TAGVAR(GCC, $1)="$GXX" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +AC_LANG_POP +])# _LT_LANG_CXX_CONFIG + + +# _LT_FUNC_STRIPNAME_CNF +# ---------------------- +# func_stripname_cnf prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# +# This function is identical to the (non-XSI) version of func_stripname, +# except this one can be used by m4 code that may be executed by configure, +# rather than the libtool script. +m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl +AC_REQUIRE([_LT_DECL_SED]) +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) +func_stripname_cnf () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname_cnf +])# _LT_FUNC_STRIPNAME_CNF + +# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) +# --------------------------------- +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +m4_defun([_LT_SYS_HIDDEN_LIBDEPS], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl +# Dependencies to place before and after the object being linked: +_LT_TAGVAR(predep_objects, $1)= +_LT_TAGVAR(postdep_objects, $1)= +_LT_TAGVAR(predeps, $1)= +_LT_TAGVAR(postdeps, $1)= +_LT_TAGVAR(compiler_lib_search_path, $1)= + +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF +int a; +void foo (void) { a = 0; } +_LT_EOF +], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF +], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer*4 a + a=0 + return + end +_LT_EOF +], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer a + a=0 + return + end +_LT_EOF +], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF +public class foo { + private int a; + public void bar (void) { + a = 0; + } +}; +_LT_EOF +], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF +package foo +func foo() { +} +_LT_EOF +]) + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +dnl Parse the compiler output and extract the necessary +dnl objects, libraries and library flags. +if AC_TRY_EVAL(ac_compile); then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test "$pre_test_object_deps_done" = no; then + case ${prev} in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then + _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" + else + _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$_LT_TAGVAR(postdeps, $1)"; then + _LT_TAGVAR(postdeps, $1)="${prev}${p}" + else + _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$_LT_TAGVAR(predep_objects, $1)"; then + _LT_TAGVAR(predep_objects, $1)="$p" + else + _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" + fi + else + if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then + _LT_TAGVAR(postdep_objects, $1)="$p" + else + _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling $1 test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +m4_if([$1], [CXX], +[case $host_os in +interix[[3-9]]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + _LT_TAGVAR(predep_objects,$1)= + _LT_TAGVAR(postdep_objects,$1)= + _LT_TAGVAR(postdeps,$1)= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac +]) + +case " $_LT_TAGVAR(postdeps, $1) " in +*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; +esac + _LT_TAGVAR(compiler_lib_search_dirs, $1)= +if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then + _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi +_LT_TAGDECL([], [compiler_lib_search_dirs], [1], + [The directories searched by this compiler when creating a shared library]) +_LT_TAGDECL([], [predep_objects], [1], + [Dependencies to place before and after the objects being linked to + create a shared library]) +_LT_TAGDECL([], [postdep_objects], [1]) +_LT_TAGDECL([], [predeps], [1]) +_LT_TAGDECL([], [postdeps], [1]) +_LT_TAGDECL([], [compiler_lib_search_path], [1], + [The library search path used internally by the compiler when linking + a shared library]) +])# _LT_SYS_HIDDEN_LIBDEPS + + +# _LT_LANG_F77_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a Fortran 77 compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_F77_CONFIG], +[AC_LANG_PUSH(Fortran 77) +if test -z "$F77" || test "X$F77" = "Xno"; then + _lt_disable_F77=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the F77 compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_F77" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${F77-"f77"} + CFLAGS=$FFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + GCC=$G77 + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$G77" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC="$lt_save_CC" + CFLAGS="$lt_save_CFLAGS" +fi # test "$_lt_disable_F77" != yes + +AC_LANG_POP +])# _LT_LANG_F77_CONFIG + + +# _LT_LANG_FC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for a Fortran compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_FC_CONFIG], +[AC_LANG_PUSH(Fortran) + +if test -z "$FC" || test "X$FC" = "Xno"; then + _lt_disable_FC=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for fc test sources. +ac_ext=${ac_fc_srcext-f} + +# Object file extension for compiled fc test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the FC compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_FC" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${FC-"f95"} + CFLAGS=$FCFLAGS + compiler=$CC + GCC=$ac_cv_fc_compiler_gnu + + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS +fi # test "$_lt_disable_FC" != yes + +AC_LANG_POP +])# _LT_LANG_FC_CONFIG + + +# _LT_LANG_GCJ_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Java Compiler compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GCJ_CONFIG], +[AC_REQUIRE([LT_PROG_GCJ])dnl +AC_LANG_SAVE + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GCJ-"gcj"} +CFLAGS=$GCJFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GCJ_CONFIG + + +# _LT_LANG_GO_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Go compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GO_CONFIG], +[AC_REQUIRE([LT_PROG_GO])dnl +AC_LANG_SAVE + +# Source file extension for Go test sources. +ac_ext=go + +# Object file extension for compiled Go test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="package main; func main() { }" + +# Code to be used in simple link tests +lt_simple_link_test_code='package main; func main() { }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GOC-"gccgo"} +CFLAGS=$GOFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) + +# Go did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GO_CONFIG + + +# _LT_LANG_RC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for the Windows resource compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_RC_CONFIG], +[AC_REQUIRE([LT_PROG_RC])dnl +AC_LANG_SAVE + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC= +CC=${RC-"windres"} +CFLAGS= +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) +_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + +if test -n "$compiler"; then + : + _LT_CONFIG($1) +fi + +GCC=$lt_save_GCC +AC_LANG_RESTORE +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_RC_CONFIG + + +# LT_PROG_GCJ +# ----------- +AC_DEFUN([LT_PROG_GCJ], +[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], + [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], + [AC_CHECK_TOOL(GCJ, gcj,) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS)])])[]dnl +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_GCJ], []) + + +# LT_PROG_GO +# ---------- +AC_DEFUN([LT_PROG_GO], +[AC_CHECK_TOOL(GOC, gccgo,) +]) + + +# LT_PROG_RC +# ---------- +AC_DEFUN([LT_PROG_RC], +[AC_CHECK_TOOL(RC, windres,) +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_RC], []) + + +# _LT_DECL_EGREP +# -------------- +# If we don't have a new enough Autoconf to choose the best grep +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_EGREP], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_REQUIRE([AC_PROG_FGREP])dnl +test -z "$GREP" && GREP=grep +_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) +_LT_DECL([], [EGREP], [1], [An ERE matcher]) +_LT_DECL([], [FGREP], [1], [A literal string matcher]) +dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too +AC_SUBST([GREP]) +]) + + +# _LT_DECL_OBJDUMP +# -------------- +# If we don't have a new enough Autoconf to choose the best objdump +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_OBJDUMP], +[AC_CHECK_TOOL(OBJDUMP, objdump, false) +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) +AC_SUBST([OBJDUMP]) +]) + +# _LT_DECL_DLLTOOL +# ---------------- +# Ensure DLLTOOL variable is set. +m4_defun([_LT_DECL_DLLTOOL], +[AC_CHECK_TOOL(DLLTOOL, dlltool, false) +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) +AC_SUBST([DLLTOOL]) +]) + +# _LT_DECL_SED +# ------------ +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +m4_defun([_LT_DECL_SED], +[AC_PROG_SED +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" +_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) +_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], + [Sed that helps us avoid accidentally triggering echo(1) options like -n]) +])# _LT_DECL_SED + +m4_ifndef([AC_PROG_SED], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ + +m4_defun([AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_SUBST([SED]) +AC_MSG_RESULT([$SED]) +])#AC_PROG_SED +])#m4_ifndef + +# Old name: +AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_SED], []) + + +# _LT_CHECK_SHELL_FEATURES +# ------------------------ +# Find out whether the shell is Bourne or XSI compatible, +# or has some other useful features. +m4_defun([_LT_CHECK_SHELL_FEATURES], +[AC_MSG_CHECKING([whether the shell understands some XSI constructs]) +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +AC_MSG_RESULT([$xsi_shell]) +_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) + +AC_MSG_CHECKING([whether the shell understands "+="]) +lt_shell_append=no +( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +AC_MSG_RESULT([$lt_shell_append]) +_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi +_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac +_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl +_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl +])# _LT_CHECK_SHELL_FEATURES + + +# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY) +# ------------------------------------------------------ +# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and +# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY. +m4_defun([_LT_PROG_FUNCTION_REPLACE], +[dnl { +sed -e '/^$1 ()$/,/^} # $1 /c\ +$1 ()\ +{\ +m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1]) +} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: +]) + + +# _LT_PROG_REPLACE_SHELLFNS +# ------------------------- +# Replace existing portable implementations of several shell functions with +# equivalent extended shell implementations where those features are available.. +m4_defun([_LT_PROG_REPLACE_SHELLFNS], +[if test x"$xsi_shell" = xyes; then + _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac]) + + _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl + func_basename_result="${1##*/}"]) + + _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac + func_basename_result="${1##*/}"]) + + _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary parameter first. + func_stripname_result=${3} + func_stripname_result=${func_stripname_result#"${1}"} + func_stripname_result=${func_stripname_result%"${2}"}]) + + _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl + func_split_long_opt_name=${1%%=*} + func_split_long_opt_arg=${1#*=}]) + + _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"}]) + + _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl + case ${1} in + *.lo) func_lo2o_result=${1%.lo}.${objext} ;; + *) func_lo2o_result=${1} ;; + esac]) + + _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo]) + + _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))]) + + _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}]) +fi + +if test x"$lt_shell_append" = xyes; then + _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"]) + + _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl + func_quote_for_eval "${2}" +dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \ + eval "${1}+=\\\\ \\$func_quote_for_eval_result"]) + + # Save a `func_append' function call where possible by direct use of '+=' + sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +else + # Save a `func_append' function call even when '+=' is not available + sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +fi + +if test x"$_lt_function_replace_fail" = x":"; then + AC_MSG_WARN([Unable to substitute extended shell functions in $ofile]) +fi +]) + +# _LT_PATH_CONVERSION_FUNCTIONS +# ----------------------------- +# Determine which file name conversion functions should be used by +# func_to_host_file (and, implicitly, by func_to_host_path). These are needed +# for certain cross-compile configurations and native mingw. +m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_MSG_CHECKING([how to convert $build file names to $host format]) +AC_CACHE_VAL(lt_cv_to_host_file_cmd, +[case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac +]) +to_host_file_cmd=$lt_cv_to_host_file_cmd +AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) +_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], + [0], [convert $build file names to $host format])dnl + +AC_MSG_CHECKING([how to convert $build file names to toolchain format]) +AC_CACHE_VAL(lt_cv_to_tool_file_cmd, +[#assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac +]) +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) +_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], + [0], [convert $build files to toolchain format])dnl +])# _LT_PATH_CONVERSION_FUNCTIONS diff --git a/build/ltoptions.m4 b/build/ltoptions.m4 new file mode 100644 index 00000000..5d9acd8e --- /dev/null +++ b/build/ltoptions.m4 @@ -0,0 +1,384 @@ +# Helper functions for option handling. -*- Autoconf -*- +# +# Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation, +# Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 7 ltoptions.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) + + +# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) +# ------------------------------------------ +m4_define([_LT_MANGLE_OPTION], +[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) + + +# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) +# --------------------------------------- +# Set option OPTION-NAME for macro MACRO-NAME, and if there is a +# matching handler defined, dispatch to it. Other OPTION-NAMEs are +# saved as a flag. +m4_define([_LT_SET_OPTION], +[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl +m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), + _LT_MANGLE_DEFUN([$1], [$2]), + [m4_warning([Unknown $1 option `$2'])])[]dnl +]) + + +# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) +# ------------------------------------------------------------ +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +m4_define([_LT_IF_OPTION], +[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) + + +# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) +# ------------------------------------------------------- +# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME +# are set. +m4_define([_LT_UNLESS_OPTIONS], +[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), + [m4_define([$0_found])])])[]dnl +m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 +])[]dnl +]) + + +# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) +# ---------------------------------------- +# OPTION-LIST is a space-separated list of Libtool options associated +# with MACRO-NAME. If any OPTION has a matching handler declared with +# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about +# the unknown option and exit. +m4_defun([_LT_SET_OPTIONS], +[# Set options +m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [_LT_SET_OPTION([$1], _LT_Option)]) + +m4_if([$1],[LT_INIT],[ + dnl + dnl Simply set some default values (i.e off) if boolean options were not + dnl specified: + _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no + ]) + _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no + ]) + dnl + dnl If no reference was made to various pairs of opposing options, then + dnl we run the default mode handler for the pair. For example, if neither + dnl `shared' nor `disable-shared' was passed, we enable building of shared + dnl archives by default: + _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) + _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], + [_LT_ENABLE_FAST_INSTALL]) + ]) +])# _LT_SET_OPTIONS + + +## --------------------------------- ## +## Macros to handle LT_INIT options. ## +## --------------------------------- ## + +# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) +# ----------------------------------------- +m4_define([_LT_MANGLE_DEFUN], +[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) + + +# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) +# ----------------------------------------------- +m4_define([LT_OPTION_DEFINE], +[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl +])# LT_OPTION_DEFINE + + +# dlopen +# ------ +LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes +]) + +AU_DEFUN([AC_LIBTOOL_DLOPEN], +[_LT_SET_OPTION([LT_INIT], [dlopen]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `dlopen' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) + + +# win32-dll +# --------- +# Declare package support for building win32 dll's. +LT_OPTION_DEFINE([LT_INIT], [win32-dll], +[enable_win32_dll=yes + +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; +esac + +test -z "$AS" && AS=as +_LT_DECL([], [AS], [1], [Assembler program])dnl + +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl + +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl +])# win32-dll + +AU_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +_LT_SET_OPTION([LT_INIT], [win32-dll]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `win32-dll' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) + + +# _LT_ENABLE_SHARED([DEFAULT]) +# ---------------------------- +# implement the --enable-shared flag, and supports the `shared' and +# `disable-shared' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_SHARED], +[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([shared], + [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) + + _LT_DECL([build_libtool_libs], [enable_shared], [0], + [Whether or not to build shared libraries]) +])# _LT_ENABLE_SHARED + +LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) +]) + +AC_DEFUN([AC_DISABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], [disable-shared]) +]) + +AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_SHARED], []) +dnl AC_DEFUN([AM_DISABLE_SHARED], []) + + + +# _LT_ENABLE_STATIC([DEFAULT]) +# ---------------------------- +# implement the --enable-static flag, and support the `static' and +# `disable-static' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_STATIC], +[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([static], + [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_static=]_LT_ENABLE_STATIC_DEFAULT) + + _LT_DECL([build_old_libs], [enable_static], [0], + [Whether or not to build static libraries]) +])# _LT_ENABLE_STATIC + +LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) +]) + +AC_DEFUN([AC_DISABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], [disable-static]) +]) + +AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_STATIC], []) +dnl AC_DEFUN([AM_DISABLE_STATIC], []) + + + +# _LT_ENABLE_FAST_INSTALL([DEFAULT]) +# ---------------------------------- +# implement the --enable-fast-install flag, and support the `fast-install' +# and `disable-fast-install' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_FAST_INSTALL], +[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([fast-install], + [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) + +_LT_DECL([fast_install], [enable_fast_install], [0], + [Whether or not to optimize for fast installation])dnl +])# _LT_ENABLE_FAST_INSTALL + +LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) + +# Old names: +AU_DEFUN([AC_ENABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `fast-install' option into LT_INIT's first parameter.]) +]) + +AU_DEFUN([AC_DISABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `disable-fast-install' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) +dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) + + +# _LT_WITH_PIC([MODE]) +# -------------------- +# implement the --with-pic flag, and support the `pic-only' and `no-pic' +# LT_INIT options. +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +m4_define([_LT_WITH_PIC], +[AC_ARG_WITH([pic], + [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for lt_pkg in $withval; do + IFS="$lt_save_ifs" + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [pic_mode=default]) + +test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) + +_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl +])# _LT_WITH_PIC + +LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) + +# Old name: +AU_DEFUN([AC_LIBTOOL_PICMODE], +[_LT_SET_OPTION([LT_INIT], [pic-only]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `pic-only' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) + +## ----------------- ## +## LTDL_INIT Options ## +## ----------------- ## + +m4_define([_LTDL_MODE], []) +LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], + [m4_define([_LTDL_MODE], [nonrecursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [recursive], + [m4_define([_LTDL_MODE], [recursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [subproject], + [m4_define([_LTDL_MODE], [subproject])]) + +m4_define([_LTDL_TYPE], []) +LT_OPTION_DEFINE([LTDL_INIT], [installable], + [m4_define([_LTDL_TYPE], [installable])]) +LT_OPTION_DEFINE([LTDL_INIT], [convenience], + [m4_define([_LTDL_TYPE], [convenience])]) diff --git a/build/ltsugar.m4 b/build/ltsugar.m4 new file mode 100644 index 00000000..9000a057 --- /dev/null +++ b/build/ltsugar.m4 @@ -0,0 +1,123 @@ +# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 6 ltsugar.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) + + +# lt_join(SEP, ARG1, [ARG2...]) +# ----------------------------- +# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their +# associated separator. +# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier +# versions in m4sugar had bugs. +m4_define([lt_join], +[m4_if([$#], [1], [], + [$#], [2], [[$2]], + [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) +m4_define([_lt_join], +[m4_if([$#$2], [2], [], + [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) + + +# lt_car(LIST) +# lt_cdr(LIST) +# ------------ +# Manipulate m4 lists. +# These macros are necessary as long as will still need to support +# Autoconf-2.59 which quotes differently. +m4_define([lt_car], [[$1]]) +m4_define([lt_cdr], +[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], + [$#], 1, [], + [m4_dquote(m4_shift($@))])]) +m4_define([lt_unquote], $1) + + +# lt_append(MACRO-NAME, STRING, [SEPARATOR]) +# ------------------------------------------ +# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. +# Note that neither SEPARATOR nor STRING are expanded; they are appended +# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). +# No SEPARATOR is output if MACRO-NAME was previously undefined (different +# than defined and empty). +# +# This macro is needed until we can rely on Autoconf 2.62, since earlier +# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. +m4_define([lt_append], +[m4_define([$1], + m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) + + + +# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) +# ---------------------------------------------------------- +# Produce a SEP delimited list of all paired combinations of elements of +# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list +# has the form PREFIXmINFIXSUFFIXn. +# Needed until we can rely on m4_combine added in Autoconf 2.62. +m4_define([lt_combine], +[m4_if(m4_eval([$# > 3]), [1], + [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl +[[m4_foreach([_Lt_prefix], [$2], + [m4_foreach([_Lt_suffix], + ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, + [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) + + +# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) +# ----------------------------------------------------------------------- +# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited +# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. +m4_define([lt_if_append_uniq], +[m4_ifdef([$1], + [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], + [lt_append([$1], [$2], [$3])$4], + [$5])], + [lt_append([$1], [$2], [$3])$4])]) + + +# lt_dict_add(DICT, KEY, VALUE) +# ----------------------------- +m4_define([lt_dict_add], +[m4_define([$1($2)], [$3])]) + + +# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) +# -------------------------------------------- +m4_define([lt_dict_add_subkey], +[m4_define([$1($2:$3)], [$4])]) + + +# lt_dict_fetch(DICT, KEY, [SUBKEY]) +# ---------------------------------- +m4_define([lt_dict_fetch], +[m4_ifval([$3], + m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), + m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) + + +# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) +# ----------------------------------------------------------------- +m4_define([lt_if_dict_fetch], +[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], + [$5], + [$6])]) + + +# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) +# -------------------------------------------------------------- +m4_define([lt_dict_filter], +[m4_if([$5], [], [], + [lt_join(m4_quote(m4_default([$4], [[, ]])), + lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), + [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl +]) diff --git a/build/ltversion.m4 b/build/ltversion.m4 new file mode 100644 index 00000000..07a8602d --- /dev/null +++ b/build/ltversion.m4 @@ -0,0 +1,23 @@ +# ltversion.m4 -- version numbers -*- Autoconf -*- +# +# Copyright (C) 2004 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# @configure_input@ + +# serial 3337 ltversion.m4 +# This file is part of GNU Libtool + +m4_define([LT_PACKAGE_VERSION], [2.4.2]) +m4_define([LT_PACKAGE_REVISION], [1.3337]) + +AC_DEFUN([LTVERSION_VERSION], +[macro_version='2.4.2' +macro_revision='1.3337' +_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) +_LT_DECL(, macro_revision, 0) +]) diff --git a/build/lt~obsolete.m4 b/build/lt~obsolete.m4 new file mode 100644 index 00000000..c573da90 --- /dev/null +++ b/build/lt~obsolete.m4 @@ -0,0 +1,98 @@ +# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004. +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 5 lt~obsolete.m4 + +# These exist entirely to fool aclocal when bootstrapping libtool. +# +# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) +# which have later been changed to m4_define as they aren't part of the +# exported API, or moved to Autoconf or Automake where they belong. +# +# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN +# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us +# using a macro with the same name in our local m4/libtool.m4 it'll +# pull the old libtool.m4 in (it doesn't see our shiny new m4_define +# and doesn't know about Autoconf macros at all.) +# +# So we provide this file, which has a silly filename so it's always +# included after everything else. This provides aclocal with the +# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything +# because those macros already exist, or will be overwritten later. +# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. +# +# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. +# Yes, that means every name once taken will need to remain here until +# we give up compatibility with versions before 1.7, at which point +# we need to keep only those names which we still refer to. + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) + +m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) +m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) +m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) +m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) +m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) +m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) +m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) +m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) +m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) +m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) +m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) +m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) +m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) +m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) +m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) +m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) +m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) +m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) +m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) +m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) +m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) +m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) +m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) +m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) +m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) +m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) +m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) +m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) +m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) +m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) +m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) +m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) +m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) +m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) +m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) +m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) +m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) +m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) +m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) +m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) +m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) +m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) +m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) +m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) +m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) +m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) +m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) +m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) +m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) +m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) diff --git a/build/lzma.m4 b/build/lzma.m4 new file mode 100644 index 00000000..0be77790 --- /dev/null +++ b/build/lzma.m4 @@ -0,0 +1,100 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl +dnl lzma.m4: Trafficserver's lzma autoconf macros +dnl + +dnl +dnl TS_CHECK_LZMA: look for lzma libraries and headers +dnl +AC_DEFUN([TS_CHECK_LZMA], [ +enable_lzma=no +AC_ARG_WITH(lzma, [AC_HELP_STRING([--with-lzma=DIR],[use a specific lzma library])], +[ + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + lzma_base_dir="$withval" + if test "$withval" != "no"; then + enable_lzma=yes + case "$withval" in + *":"*) + lzma_include="`echo $withval |sed -e 's/:.*$//'`" + lzma_ldflags="`echo $withval |sed -e 's/^.*://'`" + AC_MSG_CHECKING(checking for lzma includes in $lzma_include libs in $lzma_ldflags ) + ;; + *) + lzma_include="$withval/include" + lzma_ldflags="$withval/lib" + AC_MSG_CHECKING(checking for lzma includes in $withval) + ;; + esac + fi + fi +]) + +if test "x$lzma_base_dir" = "x"; then + AC_MSG_CHECKING([for lzma location]) + AC_CACHE_VAL(ats_cv_lzma_dir,[ + for dir in /usr/local /usr ; do + if test -d $dir && test -f $dir/include/lzma.h; then + ats_cv_lzma_dir=$dir + break + fi + done + ]) + lzma_base_dir=$ats_cv_lzma_dir + if test "x$lzma_base_dir" = "x"; then + enable_lzma=no + AC_MSG_RESULT([not found]) + else + enable_lzma=yes + lzma_include="$lzma_base_dir/include" + lzma_ldflags="$lzma_base_dir/lib" + AC_MSG_RESULT([$lzma_base_dir]) + fi +else + if test -d $lzma_include && test -d $lzma_ldflags && test -f $lzma_include/lzma.h; then + AC_MSG_RESULT([ok]) + else + AC_MSG_RESULT([not found]) + fi +fi + +lzmah=0 +if test "$enable_lzma" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + lzma_have_headers=0 + lzma_have_libs=0 + if test "$lzma_base_dir" != "/usr"; then + TS_ADDTO(CPPFLAGS, [-I${lzma_include}]) + TS_ADDTO(LDFLAGS, [-L${lzma_ldflags}]) + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-R${lzma_ldflags}]) + fi + AC_SEARCH_LIBS([lzma_code], [lzma], [lzma_have_libs=1]) + if test "$lzma_have_libs" != "0"; then + TS_FLAG_HEADERS(lzma.h, [lzma_have_headers=1]) + fi + if test "$lzma_have_headers" != "0"; then + AC_SUBST(LIBLZMA, [-llzma]) + else + enable_lzma=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi +AC_SUBST(lzmah) +]) diff --git a/build/network.m4 b/build/network.m4 new file mode 100644 index 00000000..a71390e7 --- /dev/null +++ b/build/network.m4 @@ -0,0 +1,125 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl ----------------------------------------------------------------- +dnl network.m4: Trafficserver's autoconf macros for testing network support +dnl + +dnl +dnl Checks the definition of gethostbyname_r and gethostbyaddr_r +dnl which are different for glibc, solaris and assorted other operating +dnl systems +dnl +dnl Note that this test is executed too early to see if we have all of +dnl the headers. +AC_DEFUN([TS_CHECK_GETHOSTBYNAME_R_STYLE], [ + +dnl Try and compile a glibc2 gethostbyname_r piece of code, and set the +dnl style of the routines to glibc2 on success +AC_CACHE_CHECK([style of gethostbyname_r routine], ac_cv_gethostbyname_r_style, +TS_TRY_COMPILE_NO_WARNING([ +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#ifdef HAVE_STDLIB_H +#include +#endif +],[ +int tmp = gethostbyname_r((const char *) 0, (struct hostent *) 0, + (char *) 0, 0, (struct hostent **) 0, &tmp); +/* use tmp to suppress the warning */ +tmp=0; +], ac_cv_gethostbyname_r_style=glibc2, ac_cv_gethostbyname_r_style=none)) + +if test "$ac_cv_gethostbyname_r_style" = "glibc2"; then + gethostbyname_r_glibc2=1 + AC_DEFINE(GETHOSTBYNAME_R_GLIBC2, 1, [Define if gethostbyname_r has the glibc style]) +else + gethostbyname_r_glibc2=0 +fi + +AC_CACHE_CHECK([3rd argument to the gethostbyname_r routines], ac_cv_gethostbyname_r_arg, +TS_TRY_COMPILE_NO_WARNING([ +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#ifdef HAVE_STDLIB_H +#include +#endif +],[ +int tmp = gethostbyname_r((const char *) 0, (struct hostent *) 0, + (struct hostent_data *) 0); +/* use tmp to suppress the warning */ +tmp=0; +], ac_cv_gethostbyname_r_arg=hostent_data, ac_cv_gethostbyname_r_arg=char)) + +AC_SUBST(gethostbyname_r_glibc2) +]) + +dnl +dnl TS_CHECK_LOOPBACK_IFACE: try to figure out default loopback interface +dnl +AC_DEFUN([TS_CHECK_LOOPBACK_IFACE], [ +default_loopback_iface="" +AC_MSG_CHECKING([for loopback network interface]) +case $host_os in + linux*) + default_loopback_iface=lo + ;; +darwin* | freebsd* | solaris*) + default_loopback_iface=lo0 + ;; +esac +AC_MSG_RESULT([$default_loopback_iface]) +AC_SUBST([default_loopback_iface]) +]) + +dnl +dnl Check on IN6_IS_ADDR_UNSPECIFIED. We can't just check if it is defined +dnl because some releases of FreeBSD and Solaris define it incorrectly. +dnl +AC_DEFUN([TS_CHECK_MACRO_IN6_IS_ADDR_UNSPECIFIED], [ +AC_CACHE_CHECK([IN6_IS_ADDR_UNSPECIFIED macro works], ac_cv_macro_in6_is_addr_unspecified, +TS_TRY_COMPILE_NO_WARNING([ +#include +],[ + (void) IN6_IS_ADDR_UNSPECIFIED(0); +], ac_cv_macro_in6_is_addr_unspecified=yes, ac_cv_macro_in6_is_addr_unspecified=no)) + +has_in6_is_addr_unspecified=1 +if test "x$ac_cv_macro_in6_is_addr_unspecified" = "xno"; then + has_in6_is_addr_unspecified=0 +fi +AC_SUBST(has_in6_is_addr_unspecified) +]) diff --git a/build/pcre.m4 b/build/pcre.m4 new file mode 100644 index 00000000..ba7ae3a4 --- /dev/null +++ b/build/pcre.m4 @@ -0,0 +1,114 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl +dnl pcre.m4: Trafficserver's pcre autoconf macros +dnl + +dnl +dnl TS_CHECK_PCRE: look for pcre libraries and headers +dnl +AC_DEFUN([TS_CHECK_PCRE], [ +enable_pcre=no +AC_ARG_WITH(pcre, [AC_HELP_STRING([--with-pcre=DIR],[use a specific pcre library])], +[ + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + pcre_base_dir="$withval" + if test "$withval" != "no"; then + enable_pcre=yes + case "$withval" in + *":"*) + pcre_include="`echo $withval |sed -e 's/:.*$//'`" + pcre_ldflags="`echo $withval |sed -e 's/^.*://'`" + AC_MSG_CHECKING(checking for pcre includes in $pcre_include libs in $pcre_ldflags ) + ;; + *) + pcre_include="$withval/include" + pcre_ldflags="$withval/lib" + AC_MSG_CHECKING(checking for pcre includes in $withval) + ;; + esac + fi + fi +], +[ + AC_CHECK_PROG(PCRE_CONFIG, pcre-config, pcre-config) + if test "x$PCRE_CONFIG" != "x"; then + enable_pcre=yes + pcre_base_dir="`$PCRE_CONFIG --prefix`" + pcre_include="`$PCRE_CONFIG --cflags | sed -es/-I//`" + pcre_ldflags="`$PCRE_CONFIG --libs | sed -es/-lpcre// -es/-L//`" + fi +]) + +if test "x$pcre_base_dir" = "x"; then + AC_MSG_CHECKING([for pcre location]) + AC_CACHE_VAL(ats_cv_pcre_dir,[ + for dir in /usr/local /usr ; do + if test -d $dir && ( test -f $dir/include/pcre.h || test -f $dir/include/pcre/pcre.h ); then + ats_cv_pcre_dir=$dir + break + fi + done + ]) + pcre_base_dir=$ats_cv_pcre_dir + if test "x$pcre_base_dir" = "x"; then + enable_pcre=no + AC_MSG_RESULT([not found]) + else + enable_pcre=yes + pcre_include="$pcre_base_dir/include" + pcre_ldflags="$pcre_base_dir/lib" + AC_MSG_RESULT([$pcre_base_dir]) + fi +else + AC_MSG_CHECKING(for pcre headers in $pcre_include) + if test -d $pcre_include && test -d $pcre_ldflags && ( test -f $pcre_include/pcre.h || test -f $pcre_include/pcre/pcre.h ); then + AC_MSG_RESULT([ok]) + else + AC_MSG_RESULT([not found]) + fi +fi + +pcreh=0 +pcre_pcreh=0 +if test "$enable_pcre" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + pcre_have_headers=0 + pcre_have_libs=0 + if test "$pcre_base_dir" != "/usr"; then + TS_ADDTO(CPPFLAGS, [-I${pcre_include}]) + TS_ADDTO(LDFLAGS, [-L${pcre_ldflags}]) + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-R${pcre_ldflags}]) + fi + AC_SEARCH_LIBS([pcre_exec], [pcre], [pcre_have_libs=1]) + if test "$pcre_have_libs" != "0"; then + TS_FLAG_HEADERS(pcre.h, [pcre_have_headers=1]) + TS_FLAG_HEADERS(pcre/pcre.h, [pcre_have_headers=1]) + fi + if test "$pcre_have_headers" != "0"; then + AC_DEFINE(HAVE_LIBPCRE,1,[Compiling with pcre support]) + AC_SUBST(LIBPCRE, [-lpcre]) + else + enable_pcre=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi +AC_SUBST(pcreh) +AC_SUBST(pcre_pcreh) +]) diff --git a/build/pkg.m4 b/build/pkg.m4 new file mode 100644 index 00000000..f26f84c9 --- /dev/null +++ b/build/pkg.m4 @@ -0,0 +1,199 @@ +# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- +# serial 1 (pkg-config-0.24) +# +# Copyright © 2004 Scott James Remnant . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# PKG_PROG_PKG_CONFIG([MIN-VERSION]) +# ---------------------------------- +AC_DEFUN([PKG_PROG_PKG_CONFIG], +[m4_pattern_forbid([^_?PKG_[A-Z_]+$]) +m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) +m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) +AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) +AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) +AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) + +if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then + AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) +fi +if test -n "$PKG_CONFIG"; then + _pkg_min_version=m4_default([$1], [0.9.0]) + AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) + if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + PKG_CONFIG="" + fi +fi[]dnl +])# PKG_PROG_PKG_CONFIG + +# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# +# Check to see whether a particular set of modules exists. Similar +# to PKG_CHECK_MODULES(), but does not set variables or print errors. +# +# Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) +# only at the first occurence in configure.ac, so if the first place +# it's called might be skipped (such as if it is within an "if", you +# have to call PKG_CHECK_EXISTS manually +# -------------------------------------------------------------- +AC_DEFUN([PKG_CHECK_EXISTS], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl +if test -n "$PKG_CONFIG" && \ + AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then + m4_default([$2], [:]) +m4_ifvaln([$3], [else + $3])dnl +fi]) + +# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) +# --------------------------------------------- +m4_define([_PKG_CONFIG], +[if test -n "$$1"; then + pkg_cv_[]$1="$$1" + elif test -n "$PKG_CONFIG"; then + PKG_CHECK_EXISTS([$3], + [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes ], + [pkg_failed=yes]) + else + pkg_failed=untried +fi[]dnl +])# _PKG_CONFIG + +# _PKG_SHORT_ERRORS_SUPPORTED +# ----------------------------- +AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG]) +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi[]dnl +])# _PKG_SHORT_ERRORS_SUPPORTED + + +# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], +# [ACTION-IF-NOT-FOUND]) +# +# +# Note that if there is a possibility the first call to +# PKG_CHECK_MODULES might not happen, you should be sure to include an +# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac +# +# +# -------------------------------------------------------------- +AC_DEFUN([PKG_CHECK_MODULES], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl +AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl +AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl + +pkg_failed=no +AC_MSG_CHECKING([for $1]) + +_PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) +_PKG_CONFIG([$1][_LIBS], [libs], [$2]) + +m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS +and $1[]_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details.]) + +if test $pkg_failed = yes; then + AC_MSG_RESULT([no]) + _PKG_SHORT_ERRORS_SUPPORTED + if test $_pkg_short_errors_supported = yes; then + $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` + else + $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD + + m4_default([$4], [AC_MSG_ERROR( +[Package requirements ($2) were not met: + +$$1_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +_PKG_TEXT])[]dnl + ]) +elif test $pkg_failed = untried; then + AC_MSG_RESULT([no]) + m4_default([$4], [AC_MSG_FAILURE( +[The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +_PKG_TEXT + +To get pkg-config, see .])[]dnl + ]) +else + $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS + $1[]_LIBS=$pkg_cv_[]$1[]_LIBS + AC_MSG_RESULT([yes]) + $3 +fi[]dnl +])# PKG_CHECK_MODULES + + +# PKG_INSTALLDIR(DIRECTORY) +# ------------------------- +# Substitutes the variable pkgconfigdir as the location where a module +# should install pkg-config .pc files. By default the directory is +# $libdir/pkgconfig, but the default can be changed by passing +# DIRECTORY. The user can override through the --with-pkgconfigdir +# parameter. +AC_DEFUN([PKG_INSTALLDIR], +[m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])]) +m4_pushdef([pkg_description], + [pkg-config installation directory @<:@]pkg_default[@:>@]) +AC_ARG_WITH([pkgconfigdir], + [AS_HELP_STRING([--with-pkgconfigdir], pkg_description)],, + [with_pkgconfigdir=]pkg_default) +AC_SUBST([pkgconfigdir], [$with_pkgconfigdir]) +m4_popdef([pkg_default]) +m4_popdef([pkg_description]) +]) dnl PKG_INSTALLDIR + + +# PKG_NOARCH_INSTALLDIR(DIRECTORY) +# ------------------------- +# Substitutes the variable noarch_pkgconfigdir as the location where a +# module should install arch-independent pkg-config .pc files. By +# default the directory is $datadir/pkgconfig, but the default can be +# changed by passing DIRECTORY. The user can override through the +# --with-noarch-pkgconfigdir parameter. +AC_DEFUN([PKG_NOARCH_INSTALLDIR], +[m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])]) +m4_pushdef([pkg_description], + [pkg-config arch-independent installation directory @<:@]pkg_default[@:>@]) +AC_ARG_WITH([noarch-pkgconfigdir], + [AS_HELP_STRING([--with-noarch-pkgconfigdir], pkg_description)],, + [with_noarch_pkgconfigdir=]pkg_default) +AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir]) +m4_popdef([pkg_default]) +m4_popdef([pkg_description]) +]) dnl PKG_NOARCH_INSTALLDIR diff --git a/build/plugins.mk b/build/plugins.mk new file mode 100644 index 00000000..c2844059 --- /dev/null +++ b/build/plugins.mk @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# plugins.mk: Common automake build variables for Traffic Server plugins. + +TS_PLUGIN_LDFLAGS = \ + -module \ + -shared \ + -avoid-version + +TS_PLUGIN_CPPFLAGS = \ + -D__STDC_LIMIT_MACROS=1 \ + -D__STDC_FORMAT_MACROS=1 \ + -I$(top_builddir)/proxy/api \ + -I$(top_srcdir)/proxy/api \ + -I$(top_builddir)/lib/ts \ + -I$(top_srcdir)/lib/ts + +# Provide a default AM_CPPFLAGS. Automake handles this correctly, but libtool +# throws an error if we try to do the same with AM_LDFLAGS. Hence, we provide +# TS_PLUGIN variables that can be used to construct the necessary automake +# syntax. +AM_CPPFLAGS = $(TS_PLUGIN_CPPFLAGS) +pkglibdir = $(pkglibexecdir) diff --git a/build/tcl.m4 b/build/tcl.m4 new file mode 100644 index 00000000..5113cd86 --- /dev/null +++ b/build/tcl.m4 @@ -0,0 +1,3306 @@ +#------------------------------------------------------------------------ +# SC_PATH_TCLCONFIG -- +# +# Locate the tclConfig.sh file and perform a sanity check on +# the Tcl compile flags +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --with-tcl=... +# +# Defines the following vars: +# TCL_BIN_DIR Full path to the directory containing +# the tclConfig.sh file +#------------------------------------------------------------------------ + +AC_DEFUN([SC_PATH_TCLCONFIG], [ + # + # Ok, lets find the tcl configuration + # First, look for one uninstalled. + # the alternative search directory is invoked by --with-tcl + # + + if test x"${no_tcl}" = x ; then + # we reset no_tcl in case something fails here + no_tcl=true + AC_ARG_WITH(tcl, + AC_HELP_STRING([--with-tcl], + [directory containing tcl configuration (tclConfig.sh)]), + with_tclconfig="${withval}") + AC_MSG_CHECKING([for Tcl configuration]) + AC_CACHE_VAL(ac_cv_c_tclconfig,[ + + # First check to see if --with-tcl was specified. + if test x"${with_tclconfig}" != x ; then + case "${with_tclconfig}" in + */tclConfig.sh ) + if test -f "${with_tclconfig}"; then + AC_MSG_WARN([--with-tcl argument should refer to directory containing tclConfig.sh, not to tclConfig.sh itself]) + with_tclconfig="`echo "${with_tclconfig}" | sed 's!/tclConfig\.sh$!!'`" + fi ;; + esac + if test -f "${with_tclconfig}/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd "${with_tclconfig}"; pwd)`" + else + AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh]) + fi + fi + + # then check for a private Tcl installation + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ../tcl \ + `ls -dr ../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../tcl \ + `ls -dr ../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../../tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../../tcl \ + `ls -dr ../../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../../../tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../../../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i/unix; pwd)`" + break + fi + done + fi + + # on Darwin, check in Framework installation locations + if test "`uname -s`" = "Darwin" -a x"${ac_cv_c_tclconfig}" = x ; then + for i in "`xcrun --show-sdk-path 2>/dev/null`/usr/lib" \ + `ls -d ~/Library/Frameworks 2>/dev/null` \ + `ls -d /Library/Frameworks 2>/dev/null` \ + `ls -d /Network/Library/Frameworks 2>/dev/null` \ + `ls -d /System/Library/Frameworks 2>/dev/null` \ + ; do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i; pwd)`" + break + elif test -f "$i/Tcl.framework/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i/Tcl.framework; pwd)`" + break + fi + done + fi + + # check in a few common install locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in `ls -d ${libdir} 2>/dev/null` \ + `ls -d ${exec_prefix}/lib 2>/dev/null` \ + `ls -dr ${exec_prefix}/lib/tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ${exec_prefix}/lib/tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ${exec_prefix}/lib/tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + `ls -d ${prefix}/lib 2>/dev/null` \ + `ls -dr ${prefix}/lib/tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ${prefix}/lib/tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ${prefix}/lib/tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + `ls -d /usr/local/lib 2>/dev/null` \ + `ls -dr /usr/local/lib/tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr /usr/local/lib/tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr /usr/local/lib/tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + `ls -d /usr/lib64 2>/dev/null` \ + `ls -dr /usr/lib64/tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr /usr/lib64/tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr /usr/lib64/tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + `ls -d /usr/contrib/lib 2>/dev/null` \ + `ls -dr /usr/contrib/lib/tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr /usr/contrib/lib/tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr /usr/contrib/lib/tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + `ls -d /usr/lib 2>/dev/null` \ + `ls -dr /usr/lib/tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr /usr/lib/tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr /usr/lib/tcl[[8-9]].[[0-9]]* 2>/dev/null` \ + ; do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i; pwd)`" + break + fi + done + fi + + # check in a few other private locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ${srcdir}/../tcl \ + `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i/unix; pwd)`" + break + fi + done + fi + ]) + + if test x"${ac_cv_c_tclconfig}" = x ; then + TCL_BIN_DIR="# no Tcl configs found" + AC_MSG_ERROR([Can't find Tcl configuration, install the TCL dev package]) + else + no_tcl= + TCL_BIN_DIR="${ac_cv_c_tclconfig}" + AC_MSG_RESULT([found ${TCL_BIN_DIR}/tclConfig.sh]) + fi + fi +]) + +#------------------------------------------------------------------------ +# SC_PATH_TKCONFIG -- +# +# Locate the tkConfig.sh file +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --with-tk=... +# +# Defines the following vars: +# TK_BIN_DIR Full path to the directory containing +# the tkConfig.sh file +#------------------------------------------------------------------------ + +AC_DEFUN([SC_PATH_TKCONFIG], [ + # + # Ok, lets find the tk configuration + # First, look for one uninstalled. + # the alternative search directory is invoked by --with-tk + # + + if test x"${no_tk}" = x ; then + # we reset no_tk in case something fails here + no_tk=true + AC_ARG_WITH(tk, + AC_HELP_STRING([--with-tk], + [directory containing tk configuration (tkConfig.sh)]), + with_tkconfig="${withval}") + AC_MSG_CHECKING([for Tk configuration]) + AC_CACHE_VAL(ac_cv_c_tkconfig,[ + + # First check to see if --with-tkconfig was specified. + if test x"${with_tkconfig}" != x ; then + case "${with_tkconfig}" in + */tkConfig.sh ) + if test -f "${with_tkconfig}"; then + AC_MSG_WARN([--with-tk argument should refer to directory containing tkConfig.sh, not to tkConfig.sh itself]) + with_tkconfig="`echo "${with_tkconfig}" | sed 's!/tkConfig\.sh$!!'`" + fi ;; + esac + if test -f "${with_tkconfig}/tkConfig.sh" ; then + ac_cv_c_tkconfig="`(cd "${with_tkconfig}"; pwd)`" + else + AC_MSG_ERROR([${with_tkconfig} directory doesn't contain tkConfig.sh]) + fi + fi + + # then check for a private Tk library + if test x"${ac_cv_c_tkconfig}" = x ; then + for i in \ + ../tk \ + `ls -dr ../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../tk[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../tk[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../tk \ + `ls -dr ../../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../../tk[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../../tk[[8-9]].[[0-9]]* 2>/dev/null` \ + ../../../tk \ + `ls -dr ../../../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ../../../tk[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ../../../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/unix/tkConfig.sh" ; then + ac_cv_c_tkconfig="`(cd $i/unix; pwd)`" + break + fi + done + fi + + # on Darwin, check in Framework installation locations + if test "`uname -s`" = "Darwin" -a x"${ac_cv_c_tkconfig}" = x ; then + for i in `ls -d ~/Library/Frameworks 2>/dev/null` \ + `ls -d /Library/Frameworks 2>/dev/null` \ + `ls -d /Network/Library/Frameworks 2>/dev/null` \ + `ls -d /System/Library/Frameworks 2>/dev/null` \ + ; do + if test -f "$i/Tk.framework/tkConfig.sh" ; then + ac_cv_c_tkconfig="`(cd $i/Tk.framework; pwd)`" + break + fi + done + fi + + # check in a few common install locations + if test x"${ac_cv_c_tkconfig}" = x ; then + for i in `ls -d ${libdir} 2>/dev/null` \ + `ls -d ${exec_prefix}/lib 2>/dev/null` \ + `ls -d ${prefix}/lib 2>/dev/null` \ + `ls -d /usr/local/lib 2>/dev/null` \ + `ls -d /usr/contrib/lib 2>/dev/null` \ + `ls -d /usr/lib 2>/dev/null` \ + ; do + if test -f "$i/tkConfig.sh" ; then + ac_cv_c_tkconfig="`(cd $i; pwd)`" + break + fi + done + fi + + # check in a few other private locations + if test x"${ac_cv_c_tkconfig}" = x ; then + for i in \ + ${srcdir}/../tk \ + `ls -dr ${srcdir}/../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ + `ls -dr ${srcdir}/../tk[[8-9]].[[0-9]] 2>/dev/null` \ + `ls -dr ${srcdir}/../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do + if test -f "$i/unix/tkConfig.sh" ; then + ac_cv_c_tkconfig="`(cd $i/unix; pwd)`" + break + fi + done + fi + ]) + + if test x"${ac_cv_c_tkconfig}" = x ; then + TK_BIN_DIR="# no Tk configs found" + AC_MSG_WARN([Can't find Tk configuration definitions]) + exit 0 + else + no_tk= + TK_BIN_DIR="${ac_cv_c_tkconfig}" + AC_MSG_RESULT([found ${TK_BIN_DIR}/tkConfig.sh]) + fi + fi +]) + +#------------------------------------------------------------------------ +# SC_LOAD_TCLCONFIG -- +# +# Load the tclConfig.sh file +# +# Arguments: +# +# Requires the following vars to be set: +# TCL_BIN_DIR +# +# Results: +# +# Subst the following vars: +# TCL_BIN_DIR +# TCL_SRC_DIR +# TCL_LIB_FILE +# +#------------------------------------------------------------------------ + +AC_DEFUN([SC_LOAD_TCLCONFIG], [ + AC_MSG_CHECKING([for existence of ${TCL_BIN_DIR}/tclConfig.sh]) + + if test -f "${TCL_BIN_DIR}/tclConfig.sh" ; then + AC_MSG_RESULT([loading]) + . "${TCL_BIN_DIR}/tclConfig.sh" + else + AC_MSG_RESULT([could not find ${TCL_BIN_DIR}/tclConfig.sh]) + fi + + # eval is required to do the TCL_DBGX substitution + eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" + eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" + + # If the TCL_BIN_DIR is the build directory (not the install directory), + # then set the common variable name to the value of the build variables. + # For example, the variable TCL_LIB_SPEC will be set to the value + # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC + # instead of TCL_BUILD_LIB_SPEC since it will work with both an + # installed and uninstalled version of Tcl. + if test -f "${TCL_BIN_DIR}/Makefile" ; then + TCL_LIB_SPEC="${TCL_BUILD_LIB_SPEC}" + TCL_STUB_LIB_SPEC="${TCL_BUILD_STUB_LIB_SPEC}" + TCL_STUB_LIB_PATH="${TCL_BUILD_STUB_LIB_PATH}" + elif test "`uname -s`" = "Darwin"; then + # If Tcl was built as a framework, attempt to use the libraries + # from the framework at the given location so that linking works + # against Tcl.framework installed in an arbitary location. + case ${TCL_DEFS} in + *TCL_FRAMEWORK*) + if test -f "${TCL_BIN_DIR}/${TCL_LIB_FILE}"; then + for i in "`cd "${TCL_BIN_DIR}"; pwd`" \ + "`cd "${TCL_BIN_DIR}"/../..; pwd`"; do + if test "`basename "$i"`" = "${TCL_LIB_FILE}.framework"; then + TCL_LIB_SPEC="-F`dirname "$i" | sed -e 's/ /\\\\ /g'` -framework ${TCL_LIB_FILE}" + break + fi + done + fi + if test -f "${TCL_BIN_DIR}/${TCL_STUB_LIB_FILE}"; then + TCL_STUB_LIB_SPEC="-L`echo "${TCL_BIN_DIR}" | sed -e 's/ /\\\\ /g'` ${TCL_STUB_LIB_FLAG}" + TCL_STUB_LIB_PATH="${TCL_BIN_DIR}/${TCL_STUB_LIB_FILE}" + fi + ;; + esac + fi + + # eval is required to do the TCL_DBGX substitution + eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" + eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" + eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" + eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" + + AC_SUBST(TCL_VERSION) + AC_SUBST(TCL_PATCH_LEVEL) + AC_SUBST(TCL_BIN_DIR) + AC_SUBST(TCL_SRC_DIR) + + AC_SUBST(TCL_LIB_FILE) + AC_SUBST(TCL_LIB_FLAG) + AC_SUBST(TCL_LIB_SPEC) + + AC_SUBST(TCL_STUB_LIB_FILE) + AC_SUBST(TCL_STUB_LIB_FLAG) + AC_SUBST(TCL_STUB_LIB_SPEC) +]) + +#------------------------------------------------------------------------ +# SC_LOAD_TKCONFIG -- +# +# Load the tkConfig.sh file +# +# Arguments: +# +# Requires the following vars to be set: +# TK_BIN_DIR +# +# Results: +# +# Sets the following vars that should be in tkConfig.sh: +# TK_BIN_DIR +#------------------------------------------------------------------------ + +AC_DEFUN([SC_LOAD_TKCONFIG], [ + AC_MSG_CHECKING([for existence of ${TK_BIN_DIR}/tkConfig.sh]) + + if test -f "${TK_BIN_DIR}/tkConfig.sh" ; then + AC_MSG_RESULT([loading]) + . "${TK_BIN_DIR}/tkConfig.sh" + else + AC_MSG_RESULT([could not find ${TK_BIN_DIR}/tkConfig.sh]) + fi + + # eval is required to do the TK_DBGX substitution + eval "TK_LIB_FILE=\"${TK_LIB_FILE}\"" + eval "TK_STUB_LIB_FILE=\"${TK_STUB_LIB_FILE}\"" + + # If the TK_BIN_DIR is the build directory (not the install directory), + # then set the common variable name to the value of the build variables. + # For example, the variable TK_LIB_SPEC will be set to the value + # of TK_BUILD_LIB_SPEC. An extension should make use of TK_LIB_SPEC + # instead of TK_BUILD_LIB_SPEC since it will work with both an + # installed and uninstalled version of Tcl. + if test -f "${TK_BIN_DIR}/Makefile" ; then + TK_LIB_SPEC="${TK_BUILD_LIB_SPEC}" + TK_STUB_LIB_SPEC="${TK_BUILD_STUB_LIB_SPEC}" + TK_STUB_LIB_PATH="${TK_BUILD_STUB_LIB_PATH}" + elif test "`uname -s`" = "Darwin"; then + # If Tk was built as a framework, attempt to use the libraries + # from the framework at the given location so that linking works + # against Tk.framework installed in an arbitary location. + case ${TK_DEFS} in + *TK_FRAMEWORK*) + if test -f "${TK_BIN_DIR}/${TK_LIB_FILE}"; then + for i in "`cd "${TK_BIN_DIR}"; pwd`" \ + "`cd "${TK_BIN_DIR}"/../..; pwd`"; do + if test "`basename "$i"`" = "${TK_LIB_FILE}.framework"; then + TK_LIB_SPEC="-F`dirname "$i" | sed -e 's/ /\\\\ /g'` -framework ${TK_LIB_FILE}" + break + fi + done + fi + if test -f "${TK_BIN_DIR}/${TK_STUB_LIB_FILE}"; then + TK_STUB_LIB_SPEC="-L` echo "${TK_BIN_DIR}" | sed -e 's/ /\\\\ /g'` ${TK_STUB_LIB_FLAG}" + TK_STUB_LIB_PATH="${TK_BIN_DIR}/${TK_STUB_LIB_FILE}" + fi + ;; + esac + fi + + # eval is required to do the TK_DBGX substitution + eval "TK_LIB_FLAG=\"${TK_LIB_FLAG}\"" + eval "TK_LIB_SPEC=\"${TK_LIB_SPEC}\"" + eval "TK_STUB_LIB_FLAG=\"${TK_STUB_LIB_FLAG}\"" + eval "TK_STUB_LIB_SPEC=\"${TK_STUB_LIB_SPEC}\"" + + AC_SUBST(TK_VERSION) + AC_SUBST(TK_BIN_DIR) + AC_SUBST(TK_SRC_DIR) + + AC_SUBST(TK_LIB_FILE) + AC_SUBST(TK_LIB_FLAG) + AC_SUBST(TK_LIB_SPEC) + + AC_SUBST(TK_STUB_LIB_FILE) + AC_SUBST(TK_STUB_LIB_FLAG) + AC_SUBST(TK_STUB_LIB_SPEC) +]) + +#------------------------------------------------------------------------ +# SC_PROG_TCLSH +# Locate a tclsh shell installed on the system path. This macro +# will only find a Tcl shell that already exists on the system. +# It will not find a Tcl shell in the Tcl build directory or +# a Tcl shell that has been installed from the Tcl build directory. +# If a Tcl shell can't be located on the PATH, then TCLSH_PROG will +# be set to "". Extensions should take care not to create Makefile +# rules that are run by default and depend on TCLSH_PROG. An +# extension can't assume that an executable Tcl shell exists at +# build time. +# +# Arguments +# none +# +# Results +# Subst's the following values: +# TCLSH_PROG +#------------------------------------------------------------------------ + +AC_DEFUN([SC_PROG_TCLSH], [ + AC_MSG_CHECKING([for tclsh]) + AC_CACHE_VAL(ac_cv_path_tclsh, [ + search_path=`echo ${PATH} | sed -e 's/:/ /g'` + for dir in $search_path ; do + for j in `ls -r $dir/tclsh[[8-9]]* 2> /dev/null` \ + `ls -r $dir/tclsh* 2> /dev/null` ; do + if test x"$ac_cv_path_tclsh" = x ; then + if test -f "$j" ; then + ac_cv_path_tclsh=$j + break + fi + fi + done + done + ]) + + if test -f "$ac_cv_path_tclsh" ; then + TCLSH_PROG="$ac_cv_path_tclsh" + AC_MSG_RESULT([$TCLSH_PROG]) + else + # It is not an error if an installed version of Tcl can't be located. + TCLSH_PROG="" + AC_MSG_RESULT([No tclsh found on PATH]) + fi + AC_SUBST(TCLSH_PROG) +]) + +#------------------------------------------------------------------------ +# SC_BUILD_TCLSH +# Determine the fully qualified path name of the tclsh executable +# in the Tcl build directory. This macro will correctly determine +# the name of the tclsh executable even if tclsh has not yet +# been built in the build directory. The build tclsh must be used +# when running tests from an extension build directory. It is not +# correct to use the TCLSH_PROG in cases like this. +# +# Arguments +# none +# +# Results +# Subst's the following values: +# BUILD_TCLSH +#------------------------------------------------------------------------ + +AC_DEFUN([SC_BUILD_TCLSH], [ + AC_MSG_CHECKING([for tclsh in Tcl build directory]) + BUILD_TCLSH="${TCL_BIN_DIR}"/tclsh + AC_MSG_RESULT([$BUILD_TCLSH]) + AC_SUBST(BUILD_TCLSH) +]) + +#------------------------------------------------------------------------ +# SC_ENABLE_SHARED -- +# +# Allows the building of shared libraries +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --enable-shared=yes|no +# +# Defines the following vars: +# STATIC_BUILD Used for building import/export libraries +# on Windows. +# +# Sets the following vars: +# SHARED_BUILD Value of 1 or 0 +#------------------------------------------------------------------------ + +AC_DEFUN([SC_ENABLE_SHARED], [ + AC_MSG_CHECKING([how to build libraries]) + AC_ARG_ENABLE(shared, + AC_HELP_STRING([--enable-shared], + [build and link with shared libraries (default: on)]), + [tcl_ok=$enableval], [tcl_ok=yes]) + + if test "${enable_shared+set}" = set; then + enableval="$enable_shared" + tcl_ok=$enableval + else + tcl_ok=yes + fi + + if test "$tcl_ok" = "yes" ; then + AC_MSG_RESULT([shared]) + SHARED_BUILD=1 + else + AC_MSG_RESULT([static]) + SHARED_BUILD=0 + AC_DEFINE(STATIC_BUILD, 1, [Is this a static build?]) + fi +]) + +#------------------------------------------------------------------------ +# SC_ENABLE_FRAMEWORK -- +# +# Allows the building of shared libraries into frameworks +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --enable-framework=yes|no +# +# Sets the following vars: +# FRAMEWORK_BUILD Value of 1 or 0 +#------------------------------------------------------------------------ + +AC_DEFUN([SC_ENABLE_FRAMEWORK], [ + if test "`uname -s`" = "Darwin" ; then + AC_MSG_CHECKING([how to package libraries]) + AC_ARG_ENABLE(framework, + AC_HELP_STRING([--enable-framework], + [package shared libraries in MacOSX frameworks (default: off)]), + [enable_framework=$enableval], [enable_framework=no]) + if test $enable_framework = yes; then + if test $SHARED_BUILD = 0; then + AC_MSG_WARN([Frameworks can only be built if --enable-shared is yes]) + enable_framework=no + fi + if test $tcl_corefoundation = no; then + AC_MSG_WARN([Frameworks can only be used when CoreFoundation is available]) + enable_framework=no + fi + fi + if test $enable_framework = yes; then + AC_MSG_RESULT([framework]) + FRAMEWORK_BUILD=1 + else + if test $SHARED_BUILD = 1; then + AC_MSG_RESULT([shared library]) + else + AC_MSG_RESULT([static library]) + fi + FRAMEWORK_BUILD=0 + fi + fi +]) + +#------------------------------------------------------------------------ +# SC_ENABLE_THREADS -- +# +# Specify if thread support should be enabled +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --enable-threads +# +# Sets the following vars: +# THREADS_LIBS Thread library(s) +# +# Defines the following vars: +# TCL_THREADS +# _REENTRANT +# _THREAD_SAFE +# +#------------------------------------------------------------------------ + +AC_DEFUN([SC_ENABLE_THREADS], [ + AC_ARG_ENABLE(threads, + AC_HELP_STRING([--enable-threads], + [build with threads (default: off)]), + [tcl_ok=$enableval], [tcl_ok=no]) + + if test "${TCL_THREADS}" = 1; then + tcl_threaded_core=1; + fi + + if test "$tcl_ok" = "yes" -o "${TCL_THREADS}" = 1; then + TCL_THREADS=1 + # USE_THREAD_ALLOC tells us to try the special thread-based + # allocator that significantly reduces lock contention + AC_DEFINE(USE_THREAD_ALLOC, 1, + [Do we want to use the threaded memory allocator?]) + AC_DEFINE(_REENTRANT, 1, [Do we want the reentrant OS API?]) + if test "`uname -s`" = "SunOS" ; then + AC_DEFINE(_POSIX_PTHREAD_SEMANTICS, 1, + [Do we really want to follow the standard? Yes we do!]) + fi + AC_DEFINE(_THREAD_SAFE, 1, [Do we want the thread-safe OS API?]) + AC_CHECK_LIB(pthread,pthread_mutex_init,tcl_ok=yes,tcl_ok=no) + if test "$tcl_ok" = "no"; then + # Check a little harder for __pthread_mutex_init in the same + # library, as some systems hide it there until pthread.h is + # defined. We could alternatively do an AC_TRY_COMPILE with + # pthread.h, but that will work with libpthread really doesn't + # exist, like AIX 4.2. [Bug: 4359] + AC_CHECK_LIB(pthread, __pthread_mutex_init, + tcl_ok=yes, tcl_ok=no) + fi + + if test "$tcl_ok" = "yes"; then + # The space is needed + THREADS_LIBS=" -lpthread" + else + AC_CHECK_LIB(pthreads, pthread_mutex_init, + tcl_ok=yes, tcl_ok=no) + if test "$tcl_ok" = "yes"; then + # The space is needed + THREADS_LIBS=" -lpthreads" + else + AC_CHECK_LIB(c, pthread_mutex_init, + tcl_ok=yes, tcl_ok=no) + if test "$tcl_ok" = "no"; then + AC_CHECK_LIB(c_r, pthread_mutex_init, + tcl_ok=yes, tcl_ok=no) + if test "$tcl_ok" = "yes"; then + # The space is needed + THREADS_LIBS=" -pthread" + else + TCL_THREADS=0 + AC_MSG_WARN([Don't know how to find pthread lib on your system - you must disable thread support or edit the LIBS in the Makefile...]) + fi + fi + fi + fi + + # Does the pthread-implementation provide + # 'pthread_attr_setstacksize' ? + + ac_saved_libs=$LIBS + LIBS="$LIBS $THREADS_LIBS" + AC_CHECK_FUNCS(pthread_attr_setstacksize) + AC_CHECK_FUNC(pthread_attr_get_np,tcl_ok=yes,tcl_ok=no) + if test $tcl_ok = yes ; then + AC_DEFINE(HAVE_PTHREAD_ATTR_GET_NP, 1, + [Do we want a BSD-like thread-attribute interface?]) + AC_CACHE_CHECK([for pthread_attr_get_np declaration], + tcl_cv_grep_pthread_attr_get_np, [ + AC_EGREP_HEADER(pthread_attr_get_np, pthread.h, + tcl_cv_grep_pthread_attr_get_np=present, + tcl_cv_grep_pthread_attr_get_np=missing)]) + if test $tcl_cv_grep_pthread_attr_get_np = missing ; then + AC_DEFINE(ATTRGETNP_NOT_DECLARED, 1, + [Is pthread_attr_get_np() declared in ?]) + fi + else + AC_CHECK_FUNC(pthread_getattr_np,tcl_ok=yes,tcl_ok=no) + if test $tcl_ok = yes ; then + AC_DEFINE(HAVE_PTHREAD_GETATTR_NP, 1, + [Do we want a Linux-like thread-attribute interface?]) + AC_CACHE_CHECK([for pthread_getattr_np declaration], + tcl_cv_grep_pthread_getattr_np, [ + AC_EGREP_HEADER(pthread_getattr_np, pthread.h, + tcl_cv_grep_pthread_getattr_np=present, + tcl_cv_grep_pthread_getattr_np=missing)]) + if test $tcl_cv_grep_pthread_getattr_np = missing ; then + AC_DEFINE(GETATTRNP_NOT_DECLARED, 1, + [Is pthread_getattr_np declared in ?]) + fi + fi + fi + if test $tcl_ok = no; then + # Darwin thread stacksize API + AC_CHECK_FUNCS(pthread_get_stacksize_np) + fi + LIBS=$ac_saved_libs + else + TCL_THREADS=0 + fi + # Do checking message here to not mess up interleaved configure output + AC_MSG_CHECKING([for building with threads]) + if test "${TCL_THREADS}" = 1; then + AC_DEFINE(TCL_THREADS, 1, [Are we building with threads enabled?]) + if test "${tcl_threaded_core}" = 1; then + AC_MSG_RESULT([yes (threaded core)]) + else + AC_MSG_RESULT([yes]) + fi + else + AC_MSG_RESULT([no (default)]) + fi + + AC_SUBST(TCL_THREADS) +]) + +#------------------------------------------------------------------------ +# SC_ENABLE_SYMBOLS -- +# +# Specify if debugging symbols should be used. +# Memory (TCL_MEM_DEBUG) and compile (TCL_COMPILE_DEBUG) debugging +# can also be enabled. +# +# Arguments: +# none +# +# Requires the following vars to be set in the Makefile: +# CFLAGS_DEBUG +# CFLAGS_OPTIMIZE +# LDFLAGS_DEBUG +# LDFLAGS_OPTIMIZE +# +# Results: +# +# Adds the following arguments to configure: +# --enable-symbols +# +# Defines the following vars: +# CFLAGS_DEFAULT Sets to $(CFLAGS_DEBUG) if true +# Sets to $(CFLAGS_OPTIMIZE) if false +# LDFLAGS_DEFAULT Sets to $(LDFLAGS_DEBUG) if true +# Sets to $(LDFLAGS_OPTIMIZE) if false +# DBGX Formerly used as debug library extension; +# always blank now. +# +#------------------------------------------------------------------------ + +AC_DEFUN([SC_ENABLE_SYMBOLS], [ + AC_MSG_CHECKING([for build with symbols]) + AC_ARG_ENABLE(symbols, + AC_HELP_STRING([--enable-symbols], + [build with debugging symbols (default: off)]), + [tcl_ok=$enableval], [tcl_ok=no]) +# FIXME: Currently, LDFLAGS_DEFAULT is not used, it should work like CFLAGS_DEFAULT. + DBGX="" + if test "$tcl_ok" = "no"; then + CFLAGS_DEFAULT='$(CFLAGS_OPTIMIZE)' + LDFLAGS_DEFAULT='$(LDFLAGS_OPTIMIZE)' + AC_MSG_RESULT([no]) + AC_DEFINE(TCL_CFG_OPTIMIZED, 1, [Is this an optimized build?]) + else + CFLAGS_DEFAULT='$(CFLAGS_DEBUG)' + LDFLAGS_DEFAULT='$(LDFLAGS_DEBUG)' + if test "$tcl_ok" = "yes"; then + AC_MSG_RESULT([yes (standard debugging)]) + fi + fi + AC_SUBST(CFLAGS_DEFAULT) + AC_SUBST(LDFLAGS_DEFAULT) + ### FIXME: Surely TCL_CFG_DEBUG should be set to whether we're debugging? + AC_DEFINE(TCL_CFG_DEBUG, 1, [Is debugging enabled?]) + + if test "$tcl_ok" = "mem" -o "$tcl_ok" = "all"; then + AC_DEFINE(TCL_MEM_DEBUG, 1, [Is memory debugging enabled?]) + fi + + ifelse($1,bccdebug,dnl Only enable 'compile' for the Tcl core itself + if test "$tcl_ok" = "compile" -o "$tcl_ok" = "all"; then + AC_DEFINE(TCL_COMPILE_DEBUG, 1, [Is bytecode debugging enabled?]) + AC_DEFINE(TCL_COMPILE_STATS, 1, [Are bytecode statistics enabled?]) + fi) + + if test "$tcl_ok" != "yes" -a "$tcl_ok" != "no"; then + if test "$tcl_ok" = "all"; then + AC_MSG_RESULT([enabled symbols mem ]ifelse($1,bccdebug,[compile ])[debugging]) + else + AC_MSG_RESULT([enabled $tcl_ok debugging]) + fi + fi +]) + +#------------------------------------------------------------------------ +# SC_ENABLE_LANGINFO -- +# +# Allows use of modern nl_langinfo check for better l10n. +# This is only relevant for Unix. +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --enable-langinfo=yes|no (default is yes) +# +# Defines the following vars: +# HAVE_LANGINFO Triggers use of nl_langinfo if defined. +# +#------------------------------------------------------------------------ + +AC_DEFUN([SC_ENABLE_LANGINFO], [ + AC_ARG_ENABLE(langinfo, + AC_HELP_STRING([--enable-langinfo], + [use nl_langinfo if possible to determine encoding at startup, otherwise use old heuristic (default: on)]), + [langinfo_ok=$enableval], [langinfo_ok=yes]) + + HAVE_LANGINFO=0 + if test "$langinfo_ok" = "yes"; then + AC_CHECK_HEADER(langinfo.h,[langinfo_ok=yes],[langinfo_ok=no]) + fi + AC_MSG_CHECKING([whether to use nl_langinfo]) + if test "$langinfo_ok" = "yes"; then + AC_CACHE_VAL(tcl_cv_langinfo_h, [ + AC_TRY_COMPILE([#include ], [nl_langinfo(CODESET);], + [tcl_cv_langinfo_h=yes],[tcl_cv_langinfo_h=no])]) + AC_MSG_RESULT([$tcl_cv_langinfo_h]) + if test $tcl_cv_langinfo_h = yes; then + AC_DEFINE(HAVE_LANGINFO, 1, [Do we have nl_langinfo()?]) + fi + else + AC_MSG_RESULT([$langinfo_ok]) + fi +]) + +#-------------------------------------------------------------------- +# SC_CONFIG_MANPAGES +# +# Decide whether to use symlinks for linking the manpages, +# whether to compress the manpages after installation, and +# whether to add a package name suffix to the installed +# manpages to avoidfile name clashes. +# If compression is enabled also find out what file name suffix +# the given compression program is using. +# +# Arguments: +# none +# +# Results: +# +# Adds the following arguments to configure: +# --enable-man-symlinks +# --enable-man-compression=PROG +# --enable-man-suffix[=STRING] +# +# Defines the following variable: +# +# MAN_FLAGS - The apropriate flags for installManPage +# according to the user's selection. +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_CONFIG_MANPAGES], [ + AC_MSG_CHECKING([whether to use symlinks for manpages]) + AC_ARG_ENABLE(man-symlinks, + AC_HELP_STRING([--enable-man-symlinks], + [use symlinks for the manpages (default: off)]), + test "$enableval" != "no" && MAN_FLAGS="$MAN_FLAGS --symlinks", + enableval="no") + AC_MSG_RESULT([$enableval]) + + AC_MSG_CHECKING([whether to compress the manpages]) + AC_ARG_ENABLE(man-compression, + AC_HELP_STRING([--enable-man-compression=PROG], + [compress the manpages with PROG (default: off)]), + [case $enableval in + yes) AC_MSG_ERROR([missing argument to --enable-man-compression]);; + no) ;; + *) MAN_FLAGS="$MAN_FLAGS --compress $enableval";; + esac], + enableval="no") + AC_MSG_RESULT([$enableval]) + if test "$enableval" != "no"; then + AC_MSG_CHECKING([for compressed file suffix]) + touch TeST + $enableval TeST + Z=`ls TeST* | sed 's/^....//'` + rm -f TeST* + MAN_FLAGS="$MAN_FLAGS --extension $Z" + AC_MSG_RESULT([$Z]) + fi + + AC_MSG_CHECKING([whether to add a package name suffix for the manpages]) + AC_ARG_ENABLE(man-suffix, + AC_HELP_STRING([--enable-man-suffix=STRING], + [use STRING as a suffix to manpage file names (default: no, AC_PACKAGE_NAME if enabled without specifying STRING)]), + [case $enableval in + yes) enableval="AC_PACKAGE_NAME" MAN_FLAGS="$MAN_FLAGS --suffix $enableval";; + no) ;; + *) MAN_FLAGS="$MAN_FLAGS --suffix $enableval";; + esac], + enableval="no") + AC_MSG_RESULT([$enableval]) + + AC_SUBST(MAN_FLAGS) +]) + +#-------------------------------------------------------------------- +# SC_CONFIG_SYSTEM +# +# Determine what the system is (some things cannot be easily checked +# on a feature-driven basis, alas). This can usually be done via the +# "uname" command, but there are a few systems, like Next, where +# this doesn't work. +# +# Arguments: +# none +# +# Results: +# Defines the following var: +# +# system - System/platform/version identification code. +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_CONFIG_SYSTEM], [ + AC_CACHE_CHECK([system version], tcl_cv_sys_version, [ + if test -f /usr/lib/NextStep/software_version; then + tcl_cv_sys_version=NEXTSTEP-`awk '/3/,/3/' /usr/lib/NextStep/software_version` + else + tcl_cv_sys_version=`uname -s`-`uname -r` + if test "$?" -ne 0 ; then + AC_MSG_WARN([can't find uname command]) + tcl_cv_sys_version=unknown + else + # Special check for weird MP-RAS system (uname returns weird + # results, and the version is kept in special file). + + if test -r /etc/.relid -a "X`uname -n`" = "X`uname -s`" ; then + tcl_cv_sys_version=MP-RAS-`awk '{print $[3]}' /etc/.relid` + fi + if test "`uname -s`" = "AIX" ; then + tcl_cv_sys_version=AIX-`uname -v`.`uname -r` + fi + fi + fi + ]) + system=$tcl_cv_sys_version +]) + +#-------------------------------------------------------------------- +# SC_CONFIG_CFLAGS +# +# Try to determine the proper flags to pass to the compiler +# for building shared libraries and other such nonsense. +# +# Arguments: +# none +# +# Results: +# +# Defines and substitutes the following vars: +# +# DL_OBJS - Name of the object file that implements dynamic +# loading for Tcl on this system. +# DL_LIBS - Library file(s) to include in tclsh and other base +# applications in order for the "load" command to work. +# LDFLAGS - Flags to pass to the compiler when linking object +# files into an executable application binary such +# as tclsh. +# LD_SEARCH_FLAGS-Flags to pass to ld, such as "-R /usr/local/tcl/lib", +# that tell the run-time dynamic linker where to look +# for shared libraries such as libtcl.so. Depends on +# the variable LIB_RUNTIME_DIR in the Makefile. Could +# be the same as CC_SEARCH_FLAGS if ${CC} is used to link. +# CC_SEARCH_FLAGS-Flags to pass to ${CC}, such as "-Wl,-rpath,/usr/local/tcl/lib", +# that tell the run-time dynamic linker where to look +# for shared libraries such as libtcl.so. Depends on +# the variable LIB_RUNTIME_DIR in the Makefile. +# MAKE_LIB - Command to execute to build the a library; +# differs when building shared or static. +# MAKE_STUB_LIB - +# Command to execute to build a stub library. +# INSTALL_LIB - Command to execute to install a library; +# differs when building shared or static. +# INSTALL_STUB_LIB - +# Command to execute to install a stub library. +# STLIB_LD - Base command to use for combining object files +# into a static library. +# SHLIB_CFLAGS - Flags to pass to cc when compiling the components +# of a shared library (may request position-independent +# code, among other things). +# SHLIB_LD - Base command to use for combining object files +# into a shared library. +# SHLIB_LD_LIBS - Dependent libraries for the linker to scan when +# creating shared libraries. This symbol typically +# goes at the end of the "ld" commands that build +# shared libraries. The value of the symbol is +# "${LIBS}" if all of the dependent libraries should +# be specified when creating a shared library. If +# dependent libraries should not be specified (as on +# SunOS 4.x, where they cause the link to fail, or in +# general if Tcl and Tk aren't themselves shared +# libraries), then this symbol has an empty string +# as its value. +# SHLIB_SUFFIX - Suffix to use for the names of dynamically loadable +# extensions. An empty string means we don't know how +# to use shared libraries on this platform. +# TCL_SHLIB_LD_EXTRAS - Additional element which are added to SHLIB_LD_LIBS +# TK_SHLIB_LD_EXTRAS for the build of Tcl and Tk, but not recorded in the +# tclConfig.sh, since they are only used for the build +# of Tcl and Tk. +# Examples: MacOS X records the library version and +# compatibility version in the shared library. But +# of course the Tcl version of this is only used for Tcl. +# LIB_SUFFIX - Specifies everything that comes after the "libfoo" +# in a static or shared library name, using the $VERSION variable +# to put the version in the right place. This is used +# by platforms that need non-standard library names. +# Examples: ${VERSION}.so.1.1 on NetBSD, since it needs +# to have a version after the .so, and ${VERSION}.a +# on AIX, since a shared library needs to have +# a .a extension whereas shared objects for loadable +# extensions have a .so extension. Defaults to +# ${VERSION}${SHLIB_SUFFIX}. +# TCL_NEEDS_EXP_FILE - +# 1 means that an export file is needed to link to a +# shared library. +# TCL_EXP_FILE - The name of the installed export / import file which +# should be used to link to the Tcl shared library. +# Empty if Tcl is unshared. +# TCL_BUILD_EXP_FILE - +# The name of the built export / import file which +# should be used to link to the Tcl shared library. +# Empty if Tcl is unshared. +# TCL_LIBS - +# Libs to use when linking Tcl shell or some other +# shell that includes Tcl libs. +# CFLAGS_DEBUG - +# Flags used when running the compiler in debug mode +# CFLAGS_OPTIMIZE - +# Flags used when running the compiler in optimize mode +# CFLAGS - Additional CFLAGS added as necessary (usually 64-bit) +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_CONFIG_CFLAGS], [ + + # Step 0.a: Enable 64 bit support? + + AC_MSG_CHECKING([if 64bit support is requested]) + AC_ARG_ENABLE(64bit, + AC_HELP_STRING([--enable-64bit], + [enable 64bit support (default: off)]), + [do64bit=$enableval], [do64bit=no]) + AC_MSG_RESULT([$do64bit]) + + # Step 0.b: Enable Solaris 64 bit VIS support? + + AC_MSG_CHECKING([if 64bit Sparc VIS support is requested]) + AC_ARG_ENABLE(64bit-vis, + AC_HELP_STRING([--enable-64bit-vis], + [enable 64bit Sparc VIS support (default: off)]), + [do64bitVIS=$enableval], [do64bitVIS=no]) + AC_MSG_RESULT([$do64bitVIS]) + # Force 64bit on with VIS + AS_IF([test "$do64bitVIS" = "yes"], [do64bit=yes]) + + # Step 0.c: Check if visibility support is available. Do this here so + # that platform specific alternatives can be used below if this fails. + + AC_CACHE_CHECK([if compiler supports visibility "hidden"], + tcl_cv_cc_visibility_hidden, [ + hold_cflags=$CFLAGS; CFLAGS="$CFLAGS -Werror" + AC_TRY_LINK([ + extern __attribute__((__visibility__("hidden"))) void f(void); + void f(void) {}], [f();], tcl_cv_cc_visibility_hidden=yes, + tcl_cv_cc_visibility_hidden=no) + CFLAGS=$hold_cflags]) + AS_IF([test $tcl_cv_cc_visibility_hidden = yes], [ + AC_DEFINE(MODULE_SCOPE, + [extern __attribute__((__visibility__("hidden")))], + [Compiler support for module scope symbols]) + ]) + + # Step 0.d: Disable -rpath support? + + AC_MSG_CHECKING([if rpath support is requested]) + AC_ARG_ENABLE(rpath, + AC_HELP_STRING([--disable-rpath], + [disable rpath support (default: on)]), + [doRpath=$enableval], [doRpath=yes]) + AC_MSG_RESULT([$doRpath]) + + # Step 1: set the variable "system" to hold the name and version number + # for the system. + + SC_CONFIG_SYSTEM + + # Step 2: check for existence of -ldl library. This is needed because + # Linux can use either -ldl or -ldld for dynamic loading. + + AC_CHECK_LIB(dl, dlopen, have_dl=yes, have_dl=no) + + # Require ranlib early so we can override it in special cases below. + LDAIX_SRC="" + AS_IF([test x"${SHLIB_VERSION}" = x], [SHLIB_VERSION="1.0"]) + + AC_REQUIRE([AC_PROG_RANLIB]) + + # Step 3: set configuration options based on system name and version. + + do64bit_ok=no + LDFLAGS_ORIG="$LDFLAGS" + # When ld needs options to work in 64-bit mode, put them in + # LDFLAGS_ARCH so they eventually end up in LDFLAGS even if [load] + # is disabled by the user. [Bug 1016796] + LDFLAGS_ARCH="" + TCL_EXPORT_FILE_SUFFIX="" + UNSHARED_LIB_SUFFIX="" + TCL_TRIM_DOTS='`echo ${VERSION} | tr -d .`' + ECHO_VERSION='`echo ${VERSION}`' + TCL_LIB_VERSIONS_OK=ok + CFLAGS_DEBUG=-g + CFLAGS_OPTIMIZE=-O + AS_IF([test "$GCC" = yes], [ + CFLAGS_WARNING="-Wall" + ], [CFLAGS_WARNING=""]) + TCL_NEEDS_EXP_FILE=0 + TCL_BUILD_EXP_FILE="" + TCL_EXP_FILE="" +dnl FIXME: Replace AC_CHECK_PROG with AC_CHECK_TOOL once cross compiling is fixed. +dnl AC_CHECK_TOOL(AR, ar) + AC_CHECK_PROG(AR, ar, ar) + AS_IF([test "${AR}" = ""], [ + AC_MSG_ERROR([Required archive tool 'ar' not found on PATH.]) + ]) + STLIB_LD='${AR} cr' + LD_LIBRARY_PATH_VAR="LD_LIBRARY_PATH" + PLAT_OBJS="" + PLAT_SRCS="" + case $system in + AIX-*) + AS_IF([test "${TCL_THREADS}" = "1" -a "$GCC" != "yes"], [ + # AIX requires the _r compiler when gcc isn't being used + case "${CC}" in + *_r) + # ok ... + ;; + *) + CC=${CC}_r + ;; + esac + AC_MSG_RESULT([Using $CC for compiling with threads]) + ]) + LIBS="$LIBS -lc" + SHLIB_CFLAGS="" + # Note: need the LIBS below, otherwise Tk won't find Tcl's + # symbols when dynamically loaded into tclsh. + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + + DL_OBJS="tclLoadDl.o" + LD_LIBRARY_PATH_VAR="LIBPATH" + + # Check to enable 64-bit flags for compiler/linker on AIX 4+ + LDAIX_SRC='$(UNIX_DIR)/ldAix' + AS_IF([test "$do64bit" = yes -a "`uname -v`" -gt 3], [ + AS_IF([test "$GCC" = yes], [ + AC_MSG_WARN([64bit mode not supported with GCC on $system]) + ], [ + do64bit_ok=yes + CFLAGS="$CFLAGS -q64" + LDFLAGS_ARCH="-q64" + RANLIB="${RANLIB} -X64" + AR="${AR} -X64" + SHLIB_LD_FLAGS="-b64" + ]) + ]) + + AS_IF([test "`uname -m`" = ia64], [ + # AIX-5 uses ELF style dynamic libraries on IA-64, but not PPC + SHLIB_LD="/usr/ccs/bin/ld -G -z text" + # AIX-5 has dl* in libc.so + DL_LIBS="" + AS_IF([test "$GCC" = yes], [ + CC_SEARCH_FLAGS='-Wl,-R,${LIB_RUNTIME_DIR}' + ], [ + CC_SEARCH_FLAGS='-R${LIB_RUNTIME_DIR}' + ]) + LD_SEARCH_FLAGS='-R ${LIB_RUNTIME_DIR}' + ], [ + AS_IF([test "$GCC" = yes], [SHLIB_LD='${CC} -shared'], [ + SHLIB_LD="/bin/ld -bhalt:4 -bM:SRE -bE:lib.exp -H512 -T512 -bnoentry" + ]) + SHLIB_LD="${TCL_SRC_DIR}/unix/ldAix ${SHLIB_LD} ${SHLIB_LD_FLAGS}" + DL_LIBS="-ldl" + CC_SEARCH_FLAGS='-L${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + TCL_NEEDS_EXP_FILE=1 + TCL_EXPORT_FILE_SUFFIX='${VERSION}.exp' + ]) + + # AIX v<=4.1 has some different flags than 4.2+ + AS_IF([test "$system" = "AIX-4.1" -o "`uname -v`" -lt 4], [ + AC_LIBOBJ([tclLoadAix]) + DL_LIBS="-lld" + ]) + + # On AIX <=v4 systems, libbsd.a has to be linked in to support + # non-blocking file IO. This library has to be linked in after + # the MATH_LIBS or it breaks the pow() function. The way to + # insure proper sequencing, is to add it to the tail of MATH_LIBS. + # This library also supplies gettimeofday. + # + # AIX does not have a timezone field in struct tm. When the AIX + # bsd library is used, the timezone global and the gettimeofday + # methods are to be avoided for timezone deduction instead, we + # deduce the timezone by comparing the localtime result on a + # known GMT value. + + AC_CHECK_LIB(bsd, gettimeofday, libbsd=yes, libbsd=no) + AS_IF([test $libbsd = yes], [ + MATH_LIBS="$MATH_LIBS -lbsd" + AC_DEFINE(USE_DELTA_FOR_TZ, 1, [Do we need a special AIX hack for timezones?]) + ]) + ;; + BeOS*) + SHLIB_CFLAGS="-fPIC" + SHLIB_LD='${CC} -nostart' + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + + #----------------------------------------------------------- + # Check for inet_ntoa in -lbind, for BeOS (which also needs + # -lsocket, even if the network functions are in -lnet which + # is always linked to, for compatibility. + #----------------------------------------------------------- + AC_CHECK_LIB(bind, inet_ntoa, [LIBS="$LIBS -lbind -lsocket"]) + ;; + BSD/OS-2.1*|BSD/OS-3*) + SHLIB_CFLAGS="" + SHLIB_LD="shlicc -r" + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + BSD/OS-4.*) + SHLIB_CFLAGS="-export-dynamic -fPIC" + SHLIB_LD='${CC} -shared' + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + LDFLAGS="$LDFLAGS -export-dynamic" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + dgux*) + SHLIB_CFLAGS="-K PIC" + SHLIB_LD='${CC} -G' + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + HP-UX-*.11.*) + # Use updated header definitions where possible + AC_DEFINE(_XOPEN_SOURCE_EXTENDED, 1, [Do we want to use the XOPEN network library?]) + AC_DEFINE(_XOPEN_SOURCE, 1, [Do we want to use the XOPEN network library?]) + LIBS="$LIBS -lxnet" # Use the XOPEN network library + + AS_IF([test "`uname -m`" = ia64], [ + SHLIB_SUFFIX=".so" + ], [ + SHLIB_SUFFIX=".sl" + ]) + AC_CHECK_LIB(dld, shl_load, tcl_ok=yes, tcl_ok=no) + AS_IF([test "$tcl_ok" = yes], [ + SHLIB_CFLAGS="+z" + SHLIB_LD="ld -b" + SHLIB_LD_LIBS='${LIBS}' + DL_OBJS="tclLoadShl.o" + DL_LIBS="-ldld" + LDFLAGS="$LDFLAGS -Wl,-E" + CC_SEARCH_FLAGS='-Wl,+s,+b,${LIB_RUNTIME_DIR}:.' + LD_SEARCH_FLAGS='+s +b ${LIB_RUNTIME_DIR}:.' + LD_LIBRARY_PATH_VAR="SHLIB_PATH" + ]) + AS_IF([test "$GCC" = yes], [ + SHLIB_LD='${CC} -shared' + SHLIB_LD_LIBS='${LIBS}' + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + ]) + + # Users may want PA-RISC 1.1/2.0 portable code - needs HP cc + #CFLAGS="$CFLAGS +DAportable" + + # Check to enable 64-bit flags for compiler/linker + AS_IF([test "$do64bit" = "yes"], [ + AS_IF([test "$GCC" = yes], [ + case `${CC} -dumpmachine` in + hppa64*) + # 64-bit gcc in use. Fix flags for GNU ld. + do64bit_ok=yes + SHLIB_LD='${CC} -shared' + SHLIB_LD_LIBS='${LIBS}' + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + ;; + *) + AC_MSG_WARN([64bit mode not supported with GCC on $system]) + ;; + esac + ], [ + do64bit_ok=yes + CFLAGS="$CFLAGS +DD64" + LDFLAGS_ARCH="+DD64" + ]) + ]) ;; + HP-UX-*.08.*|HP-UX-*.09.*|HP-UX-*.10.*) + SHLIB_SUFFIX=".sl" + AC_CHECK_LIB(dld, shl_load, tcl_ok=yes, tcl_ok=no) + AS_IF([test "$tcl_ok" = yes], [ + SHLIB_CFLAGS="+z" + SHLIB_LD="ld -b" + SHLIB_LD_LIBS="" + DL_OBJS="tclLoadShl.o" + DL_LIBS="-ldld" + LDFLAGS="$LDFLAGS -Wl,-E" + CC_SEARCH_FLAGS='-Wl,+s,+b,${LIB_RUNTIME_DIR}:.' + LD_SEARCH_FLAGS='+s +b ${LIB_RUNTIME_DIR}:.' + LD_LIBRARY_PATH_VAR="SHLIB_PATH" + ]) ;; + IRIX-5.*) + SHLIB_CFLAGS="" + SHLIB_LD="ld -shared -rdata_shared" + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS='-rpath ${LIB_RUNTIME_DIR}']) + ;; + IRIX-6.*) + SHLIB_CFLAGS="" + SHLIB_LD="ld -n32 -shared -rdata_shared" + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS='-rpath ${LIB_RUNTIME_DIR}']) + AS_IF([test "$GCC" = yes], [ + CFLAGS="$CFLAGS -mabi=n32" + LDFLAGS="$LDFLAGS -mabi=n32" + ], [ + case $system in + IRIX-6.3) + # Use to build 6.2 compatible binaries on 6.3. + CFLAGS="$CFLAGS -n32 -D_OLD_TERMIOS" + ;; + *) + CFLAGS="$CFLAGS -n32" + ;; + esac + LDFLAGS="$LDFLAGS -n32" + ]) + ;; + IRIX64-6.*) + SHLIB_CFLAGS="" + SHLIB_LD="ld -n32 -shared -rdata_shared" + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS='-rpath ${LIB_RUNTIME_DIR}']) + + # Check to enable 64-bit flags for compiler/linker + + AS_IF([test "$do64bit" = yes], [ + AS_IF([test "$GCC" = yes], [ + AC_MSG_WARN([64bit mode not supported by gcc]) + ], [ + do64bit_ok=yes + SHLIB_LD="ld -64 -shared -rdata_shared" + CFLAGS="$CFLAGS -64" + LDFLAGS_ARCH="-64" + ]) + ]) + ;; + Linux*) + SHLIB_CFLAGS="-fPIC" + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + + CFLAGS_OPTIMIZE="-O2" + # egcs-2.91.66 on Redhat Linux 6.0 generates lots of warnings + # when you inline the string and math operations. Turn this off to + # get rid of the warnings. + #CFLAGS_OPTIMIZE="${CFLAGS_OPTIMIZE} -D__NO_STRING_INLINES -D__NO_MATH_INLINES" + + SHLIB_LD='${CC} -shared ${CFLAGS} ${LDFLAGS}' + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + LDFLAGS="$LDFLAGS -Wl,--export-dynamic" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + AS_IF([test "`uname -m`" = "alpha"], [CFLAGS="$CFLAGS -mieee"]) + AS_IF([test $do64bit = yes], [ + AC_CACHE_CHECK([if compiler accepts -m64 flag], tcl_cv_cc_m64, [ + hold_cflags=$CFLAGS + CFLAGS="$CFLAGS -m64" + AC_TRY_LINK(,, tcl_cv_cc_m64=yes, tcl_cv_cc_m64=no) + CFLAGS=$hold_cflags]) + AS_IF([test $tcl_cv_cc_m64 = yes], [ + CFLAGS="$CFLAGS -m64" + do64bit_ok=yes + ]) + ]) + + # The combo of gcc + glibc has a bug related to inlining of + # functions like strtod(). The -fno-builtin flag should address + # this problem but it does not work. The -fno-inline flag is kind + # of overkill but it works. Disable inlining only when one of the + # files in compat/*.c is being linked in. + + AS_IF([test x"${USE_COMPAT}" != x],[CFLAGS="$CFLAGS -fno-inline"]) + ;; + GNU*) + SHLIB_CFLAGS="-fPIC" + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + + SHLIB_LD='${CC} -shared' + DL_OBJS="" + DL_LIBS="-ldl" + LDFLAGS="$LDFLAGS -Wl,--export-dynamic" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + AS_IF([test "`uname -m`" = "alpha"], [CFLAGS="$CFLAGS -mieee"]) + ;; + Lynx*) + SHLIB_CFLAGS="-fPIC" + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + CFLAGS_OPTIMIZE=-02 + SHLIB_LD='${CC} -shared' + DL_OBJS="tclLoadDl.o" + DL_LIBS="-mshared -ldl" + LD_FLAGS="-Wl,--export-dynamic" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) + ;; + MP-RAS-02*) + SHLIB_CFLAGS="-K PIC" + SHLIB_LD='${CC} -G' + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + MP-RAS-*) + SHLIB_CFLAGS="-K PIC" + SHLIB_LD='${CC} -G' + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + LDFLAGS="$LDFLAGS -Wl,-Bexport" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + NetBSD-1.*|FreeBSD-[[1-2]].*) + SHLIB_CFLAGS="-fPIC" + SHLIB_LD="ld -Bshareable -x" + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS='-rpath ${LIB_RUNTIME_DIR}']) + AC_CACHE_CHECK([for ELF], tcl_cv_ld_elf, [ + AC_EGREP_CPP(yes, [ +#ifdef __ELF__ + yes +#endif + ], tcl_cv_ld_elf=yes, tcl_cv_ld_elf=no)]) + AS_IF([test $tcl_cv_ld_elf = yes], [ + SHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.so' + ], [ + SHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.so.${SHLIB_VERSION}' + ]) + + # Ancient FreeBSD doesn't handle version numbers with dots. + + UNSHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.a' + TCL_LIB_VERSIONS_OK=nodots + ;; + OpenBSD-*) + CFLAGS_OPTIMIZE='-O2' + SHLIB_CFLAGS="-fPIC" + SHLIB_LD='${CC} -shared ${SHLIB_CFLAGS}' + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + SHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.so.${SHLIB_VERSION}' + AC_CACHE_CHECK([for ELF], tcl_cv_ld_elf, [ + AC_EGREP_CPP(yes, [ +#ifdef __ELF__ + yes +#endif + ], tcl_cv_ld_elf=yes, tcl_cv_ld_elf=no)]) + AS_IF([test $tcl_cv_ld_elf = yes], [ + LDFLAGS=-Wl,-export-dynamic + ], [LDFLAGS=""]) + + # OpenBSD doesn't do version numbers with dots. + UNSHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.a' + TCL_LIB_VERSIONS_OK=nodots + ;; + NetBSD-*|FreeBSD-*) + # FreeBSD 3.* and greater have ELF. + # NetBSD 2.* has ELF and can use 'cc -shared' to build shared libs + SHLIB_CFLAGS="-fPIC" + SHLIB_LD='${CC} -shared ${SHLIB_CFLAGS}' + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + LDFLAGS="$LDFLAGS -export-dynamic" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + AS_IF([test "${TCL_THREADS}" = "1"], [ + # The -pthread needs to go in the CFLAGS, not LIBS + LIBS=`echo $LIBS | sed s/-pthread//` + CFLAGS="$CFLAGS -pthread" + LDFLAGS="$LDFLAGS -pthread" + ]) + case $system in + FreeBSD-3.*) + # FreeBSD-3 doesn't handle version numbers with dots. + UNSHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.a' + SHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.so' + TCL_LIB_VERSIONS_OK=nodots + ;; + esac + ;; + Darwin-*) + CFLAGS_OPTIMIZE="-Os" + SHLIB_CFLAGS="-fno-common" + # To avoid discrepancies between what headers configure sees during + # preprocessing tests and compiling tests, move any -isysroot and + # -mmacosx-version-min flags from CFLAGS to CPPFLAGS: + CPPFLAGS="${CPPFLAGS} `echo " ${CFLAGS}" | \ + awk 'BEGIN {FS=" +-";ORS=" "}; {for (i=2;i<=NF;i++) \ + if ([$]i~/^(isysroot|mmacosx-version-min)/) print "-"[$]i}'`" + CFLAGS="`echo " ${CFLAGS}" | \ + awk 'BEGIN {FS=" +-";ORS=" "}; {for (i=2;i<=NF;i++) \ + if (!([$]i~/^(isysroot|mmacosx-version-min)/)) print "-"[$]i}'`" + AS_IF([test $do64bit = yes], [ + case `arch` in + ppc) + AC_CACHE_CHECK([if compiler accepts -arch ppc64 flag], + tcl_cv_cc_arch_ppc64, [ + hold_cflags=$CFLAGS + CFLAGS="$CFLAGS -arch ppc64 -mpowerpc64 -mcpu=G5" + AC_TRY_LINK(,, tcl_cv_cc_arch_ppc64=yes, + tcl_cv_cc_arch_ppc64=no) + CFLAGS=$hold_cflags]) + AS_IF([test $tcl_cv_cc_arch_ppc64 = yes], [ + CFLAGS="$CFLAGS -arch ppc64 -mpowerpc64 -mcpu=G5" + do64bit_ok=yes + ]);; + i386) + AC_CACHE_CHECK([if compiler accepts -arch x86_64 flag], + tcl_cv_cc_arch_x86_64, [ + hold_cflags=$CFLAGS + CFLAGS="$CFLAGS -arch x86_64" + AC_TRY_LINK(,, tcl_cv_cc_arch_x86_64=yes, + tcl_cv_cc_arch_x86_64=no) + CFLAGS=$hold_cflags]) + AS_IF([test $tcl_cv_cc_arch_x86_64 = yes], [ + CFLAGS="$CFLAGS -arch x86_64" + do64bit_ok=yes + ]);; + *) + AC_MSG_WARN([Don't know how enable 64-bit on architecture `arch`]);; + esac + ], [ + # Check for combined 32-bit and 64-bit fat build + AS_IF([echo "$CFLAGS " |grep -E -q -- '-arch (ppc64|x86_64) ' \ + && echo "$CFLAGS " |grep -E -q -- '-arch (ppc|i386) '], [ + fat_32_64=yes]) + ]) + SHLIB_LD='${CC} -dynamiclib ${CFLAGS} ${LDFLAGS}' + AC_CACHE_CHECK([if ld accepts -single_module flag], tcl_cv_ld_single_module, [ + hold_ldflags=$LDFLAGS + LDFLAGS="$LDFLAGS -dynamiclib -Wl,-single_module" + AC_TRY_LINK(, [int i;], tcl_cv_ld_single_module=yes, tcl_cv_ld_single_module=no) + LDFLAGS=$hold_ldflags]) + AS_IF([test $tcl_cv_ld_single_module = yes], [ + SHLIB_LD="${SHLIB_LD} -Wl,-single_module" + ]) + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".dylib" + DL_OBJS="tclLoadDyld.o" + DL_LIBS="" + # Don't use -prebind when building for Mac OS X 10.4 or later only: + AS_IF([test "`echo "${MACOSX_DEPLOYMENT_TARGET}" | awk -F '10\\.' '{print int([$]2)}'`" -lt 4 -a \ + "`echo "${CPPFLAGS}" | awk -F '-mmacosx-version-min=10\\.' '{print int([$]2)}'`" -lt 4], [ + LDFLAGS="$LDFLAGS -prebind"]) + LDFLAGS="$LDFLAGS -headerpad_max_install_names" + AC_CACHE_CHECK([if ld accepts -search_paths_first flag], + tcl_cv_ld_search_paths_first, [ + hold_ldflags=$LDFLAGS + LDFLAGS="$LDFLAGS -Wl,-search_paths_first" + AC_TRY_LINK(, [int i;], tcl_cv_ld_search_paths_first=yes, + tcl_cv_ld_search_paths_first=no) + LDFLAGS=$hold_ldflags]) + AS_IF([test $tcl_cv_ld_search_paths_first = yes], [ + LDFLAGS="$LDFLAGS -Wl,-search_paths_first" + ]) + AS_IF([test "$tcl_cv_cc_visibility_hidden" != yes], [ + AC_DEFINE(MODULE_SCOPE, [__private_extern__], + [Compiler support for module scope symbols]) + ]) + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + LD_LIBRARY_PATH_VAR="DYLD_LIBRARY_PATH" + AC_DEFINE(MAC_OSX_TCL, 1, [Is this a Mac I see before me?]) + PLAT_OBJS='${MAC_OSX_OBJS}' + PLAT_SRCS='${MAC_OSX_SRCS}' + AC_MSG_CHECKING([whether to use CoreFoundation]) + AC_ARG_ENABLE(corefoundation, + AC_HELP_STRING([--enable-corefoundation], + [use CoreFoundation API on MacOSX (default: on)]), + [tcl_corefoundation=$enableval], [tcl_corefoundation=yes]) + AC_MSG_RESULT([$tcl_corefoundation]) + AS_IF([test $tcl_corefoundation = yes], [ + AC_CACHE_CHECK([for CoreFoundation.framework], + tcl_cv_lib_corefoundation, [ + hold_libs=$LIBS + AS_IF([test "$fat_32_64" = yes], [ + for v in CFLAGS CPPFLAGS LDFLAGS; do + # On Tiger there is no 64-bit CF, so remove 64-bit + # archs from CFLAGS et al. while testing for + # presence of CF. 64-bit CF is disabled in + # tclUnixPort.h if necessary. + eval 'hold_'$v'="$'$v'";'$v'="`echo "$'$v' "|sed -e "s/-arch ppc64 / /g" -e "s/-arch x86_64 / /g"`"' + done]) + LIBS="$LIBS -framework CoreFoundation" + AC_TRY_LINK([#include ], + [CFBundleRef b = CFBundleGetMainBundle();], + tcl_cv_lib_corefoundation=yes, + tcl_cv_lib_corefoundation=no) + AS_IF([test "$fat_32_64" = yes], [ + for v in CFLAGS CPPFLAGS LDFLAGS; do + eval $v'="$hold_'$v'"' + done]) + LIBS=$hold_libs]) + AS_IF([test $tcl_cv_lib_corefoundation = yes], [ + LIBS="$LIBS -framework CoreFoundation" + AC_DEFINE(HAVE_COREFOUNDATION, 1, + [Do we have access to Darwin CoreFoundation.framework?]) + ], [tcl_corefoundation=no]) + AS_IF([test "$fat_32_64" = yes -a $tcl_corefoundation = yes],[ + AC_CACHE_CHECK([for 64-bit CoreFoundation], + tcl_cv_lib_corefoundation_64, [ + for v in CFLAGS CPPFLAGS LDFLAGS; do + eval 'hold_'$v'="$'$v'";'$v'="`echo "$'$v' "|sed -e "s/-arch ppc / /g" -e "s/-arch i386 / /g"`"' + done + AC_TRY_LINK([#include ], + [CFBundleRef b = CFBundleGetMainBundle();], + tcl_cv_lib_corefoundation_64=yes, + tcl_cv_lib_corefoundation_64=no) + for v in CFLAGS CPPFLAGS LDFLAGS; do + eval $v'="$hold_'$v'"' + done]) + AS_IF([test $tcl_cv_lib_corefoundation_64 = no], [ + AC_DEFINE(NO_COREFOUNDATION_64, 1, + [Is Darwin CoreFoundation unavailable for 64-bit?]) + LDFLAGS="$LDFLAGS -Wl,-no_arch_warnings" + ]) + ]) + ]) + ;; + NEXTSTEP-*) + SHLIB_CFLAGS="" + SHLIB_LD='${CC} -nostdlib -r' + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadNext.o" + DL_LIBS="" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + OS/390-*) + CFLAGS_OPTIMIZE="" # Optimizer is buggy + AC_DEFINE(_OE_SOCKETS, 1, # needed in sys/socket.h + [Should OS/390 do the right thing with sockets?]) + ;; + OSF1-1.0|OSF1-1.1|OSF1-1.2) + # OSF/1 1.[012] from OSF, and derivatives, including Paragon OSF/1 + SHLIB_CFLAGS="" + # Hack: make package name same as library name + SHLIB_LD='ld -R -export $@:' + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadOSF.o" + DL_LIBS="" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + OSF1-1.*) + # OSF/1 1.3 from OSF using ELF, and derivatives, including AD2 + SHLIB_CFLAGS="-fPIC" + AS_IF([test "$SHARED_BUILD" = 1], [SHLIB_LD="ld -shared"], [ + SHLIB_LD="ld -non_shared" + ]) + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + OSF1-V*) + # Digital OSF/1 + SHLIB_CFLAGS="" + AS_IF([test "$SHARED_BUILD" = 1], [ + SHLIB_LD='ld -shared -expect_unresolved "*"' + ], [ + SHLIB_LD='ld -non_shared -expect_unresolved "*"' + ]) + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + AS_IF([test $doRpath = yes], [ + CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS='-rpath ${LIB_RUNTIME_DIR}']) + AS_IF([test "$GCC" = yes], [CFLAGS="$CFLAGS -mieee"], [ + CFLAGS="$CFLAGS -DHAVE_TZSET -std1 -ieee"]) + # see pthread_intro(3) for pthread support on osf1, k.furukawa + AS_IF([test "${TCL_THREADS}" = 1], [ + CFLAGS="$CFLAGS -DHAVE_PTHREAD_ATTR_SETSTACKSIZE" + CFLAGS="$CFLAGS -DTCL_THREAD_STACK_MIN=PTHREAD_STACK_MIN*64" + LIBS=`echo $LIBS | sed s/-lpthreads//` + AS_IF([test "$GCC" = yes], [ + LIBS="$LIBS -lpthread -lmach -lexc" + ], [ + CFLAGS="$CFLAGS -pthread" + LDFLAGS="$LDFLAGS -pthread" + ]) + ]) + ;; + QNX-6*) + # QNX RTP + # This may work for all QNX, but it was only reported for v6. + SHLIB_CFLAGS="-fPIC" + SHLIB_LD="ld -Bshareable -x" + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + # dlopen is in -lc on QNX + DL_LIBS="" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + SCO_SV-3.2*) + # Note, dlopen is available only on SCO 3.2.5 and greater. However, + # this test works, since "uname -s" was non-standard in 3.2.4 and + # below. + AS_IF([test "$GCC" = yes], [ + SHLIB_CFLAGS="-fPIC -melf" + LDFLAGS="$LDFLAGS -melf -Wl,-Bexport" + ], [ + SHLIB_CFLAGS="-Kpic -belf" + LDFLAGS="$LDFLAGS -belf -Wl,-Bexport" + ]) + SHLIB_LD="ld -G" + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + SINIX*5.4*) + SHLIB_CFLAGS="-K PIC" + SHLIB_LD='${CC} -G' + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + SunOS-4*) + SHLIB_CFLAGS="-PIC" + SHLIB_LD="ld" + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + CC_SEARCH_FLAGS='-L${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + + # SunOS can't handle version numbers with dots in them in library + # specs, like -ltcl7.5, so use -ltcl75 instead. Also, it + # requires an extra version number at the end of .so file names. + # So, the library has to have a name like libtcl75.so.1.0 + + SHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.so.${SHLIB_VERSION}' + UNSHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.a' + TCL_LIB_VERSIONS_OK=nodots + ;; + SunOS-5.[[0-6]]) + # Careful to not let 5.10+ fall into this case + + # Note: If _REENTRANT isn't defined, then Solaris + # won't define thread-safe library routines. + + AC_DEFINE(_REENTRANT, 1, [Do we want the reentrant OS API?]) + AC_DEFINE(_POSIX_PTHREAD_SEMANTICS, 1, + [Do we really want to follow the standard? Yes we do!]) + + SHLIB_CFLAGS="-KPIC" + + # Note: need the LIBS below, otherwise Tk won't find Tcl's + # symbols when dynamically loaded into tclsh. + + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + AS_IF([test "$GCC" = yes], [ + SHLIB_LD='${CC} -shared' + CC_SEARCH_FLAGS='-Wl,-R,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + ], [ + SHLIB_LD="/usr/ccs/bin/ld -G -z text" + CC_SEARCH_FLAGS='-R ${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + ]) + ;; + SunOS-5*) + # Note: If _REENTRANT isn't defined, then Solaris + # won't define thread-safe library routines. + + AC_DEFINE(_REENTRANT, 1, [Do we want the reentrant OS API?]) + AC_DEFINE(_POSIX_PTHREAD_SEMANTICS, 1, + [Do we really want to follow the standard? Yes we do!]) + + SHLIB_CFLAGS="-KPIC" + + # Check to enable 64-bit flags for compiler/linker + AS_IF([test "$do64bit" = yes], [ + arch=`isainfo` + AS_IF([test "$arch" = "sparcv9 sparc"], [ + AS_IF([test "$GCC" = yes], [ + AS_IF([test "`${CC} -dumpversion | awk -F. '{print [$]1}'`" -lt 3], [ + AC_MSG_WARN([64bit mode not supported with GCC < 3.2 on $system]) + ], [ + do64bit_ok=yes + CFLAGS="$CFLAGS -m64 -mcpu=v9" + LDFLAGS="$LDFLAGS -m64 -mcpu=v9" + SHLIB_CFLAGS="-fPIC" + ]) + ], [ + do64bit_ok=yes + AS_IF([test "$do64bitVIS" = yes], [ + CFLAGS="$CFLAGS -xarch=v9a" + LDFLAGS_ARCH="-xarch=v9a" + ], [ + CFLAGS="$CFLAGS -xarch=v9" + LDFLAGS_ARCH="-xarch=v9" + ]) + # Solaris 64 uses this as well + #LD_LIBRARY_PATH_VAR="LD_LIBRARY_PATH_64" + ]) + ], [AS_IF([test "$arch" = "amd64 i386"], [ + AS_IF([test "$GCC" = yes], [ + case $system in + SunOS-5.1[[1-9]]*|SunOS-5.[[2-9]][[0-9]]*) + do64bit_ok=yes + CFLAGS="$CFLAGS -m64" + LDFLAGS="$LDFLAGS -m64";; + *) + AC_MSG_WARN([64bit mode not supported with GCC on $system]);; + esac + ], [ + do64bit_ok=yes + case $system in + SunOS-5.1[[1-9]]*|SunOS-5.[[2-9]][[0-9]]*) + CFLAGS="$CFLAGS -m64" + LDFLAGS="$LDFLAGS -m64";; + *) + CFLAGS="$CFLAGS -xarch=amd64" + LDFLAGS="$LDFLAGS -xarch=amd64";; + esac + ]) + ], [AC_MSG_WARN([64bit mode not supported for $arch])])]) + ]) + + #-------------------------------------------------------------------- + # On Solaris 5.x i386 with the sunpro compiler we need to link + # with sunmath to get floating point rounding control + #-------------------------------------------------------------------- + AS_IF([test "$GCC" = yes],[use_sunmath=no],[ + arch=`isainfo` + AC_MSG_CHECKING([whether to use -lsunmath for fp rounding control]) + AS_IF([test "$arch" = "amd64 i386"], [ + AC_MSG_RESULT([yes]) + MATH_LIBS="-lsunmath $MATH_LIBS" + AC_CHECK_HEADER(sunmath.h) + use_sunmath=yes + ], [ + AC_MSG_RESULT([no]) + use_sunmath=no + ]) + ]) + + # Note: need the LIBS below, otherwise Tk won't find Tcl's + # symbols when dynamically loaded into tclsh. + + SHLIB_LD_LIBS='${LIBS}' + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + AS_IF([test "$GCC" = yes], [ + SHLIB_LD='${CC} -shared' + CC_SEARCH_FLAGS='-Wl,-R,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} + AS_IF([test "$do64bit_ok" = yes], [ + AS_IF([test "$arch" = "sparcv9 sparc"], [ + # We need to specify -static-libgcc or we need to + # add the path to the sparv9 libgcc. + SHLIB_LD="$SHLIB_LD -m64 -mcpu=v9 -static-libgcc" + # for finding sparcv9 libgcc, get the regular libgcc + # path, remove so name and append 'sparcv9' + #v9gcclibdir="`gcc -print-file-name=libgcc_s.so` | ..." + #CC_SEARCH_FLAGS="${CC_SEARCH_FLAGS},-R,$v9gcclibdir" + ], [AS_IF([test "$arch" = "amd64 i386"], [ + SHLIB_LD="$SHLIB_LD -m64 -static-libgcc" + ])]) + ]) + ], [ + AS_IF([test "$use_sunmath" = yes], [textmode=textoff],[textmode=text]) + case $system in + SunOS-5.[[1-9]][[0-9]]*) + SHLIB_LD="\${CC} -G -z $textmode \${LDFLAGS}";; + *) + SHLIB_LD="/usr/ccs/bin/ld -G -z $textmode";; + esac + CC_SEARCH_FLAGS='-Wl,-R,${LIB_RUNTIME_DIR}' + LD_SEARCH_FLAGS='-R ${LIB_RUNTIME_DIR}' + ]) + ;; + UNIX_SV* | UnixWare-5*) + SHLIB_CFLAGS="-KPIC" + SHLIB_LD='${CC} -G' + SHLIB_LD_LIBS="" + SHLIB_SUFFIX=".so" + DL_OBJS="tclLoadDl.o" + DL_LIBS="-ldl" + # Some UNIX_SV* systems (unixware 1.1.2 for example) have linkers + # that don't grok the -Bexport option. Test that it does. + AC_CACHE_CHECK([for ld accepts -Bexport flag], tcl_cv_ld_Bexport, [ + hold_ldflags=$LDFLAGS + LDFLAGS="$LDFLAGS -Wl,-Bexport" + AC_TRY_LINK(, [int i;], tcl_cv_ld_Bexport=yes, tcl_cv_ld_Bexport=no) + LDFLAGS=$hold_ldflags]) + AS_IF([test $tcl_cv_ld_Bexport = yes], [ + LDFLAGS="$LDFLAGS -Wl,-Bexport" + ]) + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + ;; + esac + + AS_IF([test "$do64bit" = yes -a "$do64bit_ok" = no], [ + AC_MSG_WARN([64bit support being disabled -- don't know magic for this platform]) + ]) + + AS_IF([test "$do64bit" = yes -a "$do64bit_ok" = yes], [ + AC_DEFINE(TCL_CFG_DO64BIT, 1, [Is this a 64-bit build?]) + ]) + +dnl # Add any CPPFLAGS set in the environment to our CFLAGS, but delay doing so +dnl # until the end of configure, as configure's compile and link tests use +dnl # both CPPFLAGS and CFLAGS (unlike our compile and link) but configure's +dnl # preprocessing tests use only CPPFLAGS. + AC_CONFIG_COMMANDS_PRE([CFLAGS="${CFLAGS} ${CPPFLAGS}"; CPPFLAGS=""]) + + # Step 4: disable dynamic loading if requested via a command-line switch. + + AC_ARG_ENABLE(load, + AC_HELP_STRING([--enable-load], + [allow dynamic loading and "load" command (default: on)]), + [tcl_ok=$enableval], [tcl_ok=yes]) + AS_IF([test "$tcl_ok" = no], [DL_OBJS=""]) + + AS_IF([test "x$DL_OBJS" != x], [BUILD_DLTEST="\$(DLTEST_TARGETS)"], [ + AC_MSG_WARN([Can't figure out how to do dynamic loading or shared libraries on this system.]) + SHLIB_CFLAGS="" + SHLIB_LD="" + SHLIB_SUFFIX="" + DL_OBJS="tclLoadNone.o" + DL_LIBS="" + LDFLAGS="$LDFLAGS_ORIG" + CC_SEARCH_FLAGS="" + LD_SEARCH_FLAGS="" + BUILD_DLTEST="" + ]) + LDFLAGS="$LDFLAGS $LDFLAGS_ARCH" + + # If we're running gcc, then change the C flags for compiling shared + # libraries to the right flags for gcc, instead of those for the + # standard manufacturer compiler. + + AS_IF([test "$DL_OBJS" != "tclLoadNone.o" -a "$GCC" = yes], [ + case $system in + AIX-*) ;; + BSD/OS*) ;; + IRIX*) ;; + NetBSD-*|FreeBSD-*) ;; + Darwin-*) ;; + SCO_SV-3.2*) ;; + *) SHLIB_CFLAGS="-fPIC" ;; + esac]) + + AS_IF([test "$SHARED_LIB_SUFFIX" = ""], [ + SHARED_LIB_SUFFIX='${VERSION}${SHLIB_SUFFIX}']) + AS_IF([test "$UNSHARED_LIB_SUFFIX" = ""], [ + UNSHARED_LIB_SUFFIX='${VERSION}.a']) + + AS_IF([test "${SHARED_BUILD}" = 1 -a "${SHLIB_SUFFIX}" != ""], [ + LIB_SUFFIX=${SHARED_LIB_SUFFIX} + MAKE_LIB='${SHLIB_LD} -o [$]@ ${OBJS} ${SHLIB_LD_LIBS} ${TCL_SHLIB_LD_EXTRAS} ${TK_SHLIB_LD_EXTRAS} ${LD_SEARCH_FLAGS}' + INSTALL_LIB='$(INSTALL_LIBRARY) $(LIB_FILE) "$(LIB_INSTALL_DIR)"/$(LIB_FILE)' + ], [ + LIB_SUFFIX=${UNSHARED_LIB_SUFFIX} + + AS_IF([test "$RANLIB" = ""], [ + MAKE_LIB='$(STLIB_LD) [$]@ ${OBJS}' + INSTALL_LIB='$(INSTALL_LIBRARY) $(LIB_FILE) "$(LIB_INSTALL_DIR)"/$(LIB_FILE)' + ], [ + MAKE_LIB='${STLIB_LD} [$]@ ${OBJS} ; ${RANLIB} [$]@' + INSTALL_LIB='$(INSTALL_LIBRARY) $(LIB_FILE) "$(LIB_INSTALL_DIR)"/$(LIB_FILE) ; (cd "$(LIB_INSTALL_DIR)" ; $(RANLIB) $(LIB_FILE))' + ]) + ]) + + # Stub lib does not depend on shared/static configuration + AS_IF([test "$RANLIB" = ""], [ + MAKE_STUB_LIB='${STLIB_LD} [$]@ ${STUB_LIB_OBJS}' + INSTALL_STUB_LIB='$(INSTALL_LIBRARY) $(STUB_LIB_FILE) "$(LIB_INSTALL_DIR)/$(STUB_LIB_FILE)"' + ], [ + MAKE_STUB_LIB='${STLIB_LD} [$]@ ${STUB_LIB_OBJS} ; ${RANLIB} [$]@' + INSTALL_STUB_LIB='$(INSTALL_LIBRARY) $(STUB_LIB_FILE) "$(LIB_INSTALL_DIR)"/$(STUB_LIB_FILE) ; (cd "$(LIB_INSTALL_DIR)" ; $(RANLIB) $(STUB_LIB_FILE))' + ]) + + # Define TCL_LIBS now that we know what DL_LIBS is. + # The trick here is that we don't want to change the value of TCL_LIBS if + # it is already set when tclConfig.sh had been loaded by Tk. + AS_IF([test "x${TCL_LIBS}" = x], [ + TCL_LIBS="${DL_LIBS} ${LIBS} ${MATH_LIBS}"]) + AC_SUBST(TCL_LIBS) + + # FIXME: This subst was left in only because the TCL_DL_LIBS + # entry in tclConfig.sh uses it. It is not clear why someone + # would use TCL_DL_LIBS instead of TCL_LIBS. + AC_SUBST(DL_LIBS) + + AC_SUBST(DL_OBJS) + AC_SUBST(PLAT_OBJS) + AC_SUBST(PLAT_SRCS) + AC_SUBST(LDAIX_SRC) + AC_SUBST(CFLAGS) + AC_SUBST(CFLAGS_DEBUG) + AC_SUBST(CFLAGS_OPTIMIZE) + AC_SUBST(CFLAGS_WARNING) + + AC_SUBST(LDFLAGS) + AC_SUBST(LDFLAGS_DEBUG) + AC_SUBST(LDFLAGS_OPTIMIZE) + AC_SUBST(CC_SEARCH_FLAGS) + AC_SUBST(LD_SEARCH_FLAGS) + + AC_SUBST(STLIB_LD) + AC_SUBST(SHLIB_LD) + AC_SUBST(TCL_SHLIB_LD_EXTRAS) + AC_SUBST(TK_SHLIB_LD_EXTRAS) + AC_SUBST(SHLIB_LD_LIBS) + AC_SUBST(SHLIB_CFLAGS) + AC_SUBST(SHLIB_SUFFIX) + AC_DEFINE_UNQUOTED(TCL_SHLIB_EXT,"${SHLIB_SUFFIX}", + [What is the default extension for shared libraries?]) + + AC_SUBST(MAKE_LIB) + AC_SUBST(MAKE_STUB_LIB) + AC_SUBST(INSTALL_LIB) + AC_SUBST(INSTALL_STUB_LIB) + AC_SUBST(RANLIB) +]) + +#-------------------------------------------------------------------- +# SC_SERIAL_PORT +# +# Determine which interface to use to talk to the serial port. +# Note that #include lines must begin in leftmost column for +# some compilers to recognize them as preprocessor directives, +# and some build environments have stdin not pointing at a +# pseudo-terminal (usually /dev/null instead.) +# +# Arguments: +# none +# +# Results: +# +# Defines only one of the following vars: +# HAVE_SYS_MODEM_H +# USE_TERMIOS +# USE_TERMIO +# USE_SGTTY +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_SERIAL_PORT], [ + AC_CHECK_HEADERS(sys/modem.h) + AC_CACHE_CHECK([termios vs. termio vs. sgtty], tcl_cv_api_serial, [ + AC_TRY_RUN([ +#include + +int main() { + struct termios t; + if (tcgetattr(0, &t) == 0) { + cfsetospeed(&t, 0); + t.c_cflag |= PARENB | PARODD | CSIZE | CSTOPB; + return 0; + } + return 1; +}], tcl_cv_api_serial=termios, tcl_cv_api_serial=no, tcl_cv_api_serial=no) + if test $tcl_cv_api_serial = no ; then + AC_TRY_RUN([ +#include + +int main() { + struct termio t; + if (ioctl(0, TCGETA, &t) == 0) { + t.c_cflag |= CBAUD | PARENB | PARODD | CSIZE | CSTOPB; + return 0; + } + return 1; +}], tcl_cv_api_serial=termio, tcl_cv_api_serial=no, tcl_cv_api_serial=no) + fi + if test $tcl_cv_api_serial = no ; then + AC_TRY_RUN([ +#include + +int main() { + struct sgttyb t; + if (ioctl(0, TIOCGETP, &t) == 0) { + t.sg_ospeed = 0; + t.sg_flags |= ODDP | EVENP | RAW; + return 0; + } + return 1; +}], tcl_cv_api_serial=sgtty, tcl_cv_api_serial=no, tcl_cv_api_serial=no) + fi + if test $tcl_cv_api_serial = no ; then + AC_TRY_RUN([ +#include +#include + +int main() { + struct termios t; + if (tcgetattr(0, &t) == 0 + || errno == ENOTTY || errno == ENXIO || errno == EINVAL) { + cfsetospeed(&t, 0); + t.c_cflag |= PARENB | PARODD | CSIZE | CSTOPB; + return 0; + } + return 1; +}], tcl_cv_api_serial=termios, tcl_cv_api_serial=no, tcl_cv_api_serial=no) + fi + if test $tcl_cv_api_serial = no; then + AC_TRY_RUN([ +#include +#include + +int main() { + struct termio t; + if (ioctl(0, TCGETA, &t) == 0 + || errno == ENOTTY || errno == ENXIO || errno == EINVAL) { + t.c_cflag |= CBAUD | PARENB | PARODD | CSIZE | CSTOPB; + return 0; + } + return 1; + }], tcl_cv_api_serial=termio, tcl_cv_api_serial=no, tcl_cv_api_serial=no) + fi + if test $tcl_cv_api_serial = no; then + AC_TRY_RUN([ +#include +#include + +int main() { + struct sgttyb t; + if (ioctl(0, TIOCGETP, &t) == 0 + || errno == ENOTTY || errno == ENXIO || errno == EINVAL) { + t.sg_ospeed = 0; + t.sg_flags |= ODDP | EVENP | RAW; + return 0; + } + return 1; +}], tcl_cv_api_serial=sgtty, tcl_cv_api_serial=none, tcl_cv_api_serial=none) + fi]) + case $tcl_cv_api_serial in + termios) AC_DEFINE(USE_TERMIOS, 1, [Use the termios API for serial lines]);; + termio) AC_DEFINE(USE_TERMIO, 1, [Use the termio API for serial lines]);; + sgtty) AC_DEFINE(USE_SGTTY, 1, [Use the sgtty API for serial lines]);; + esac +]) + +#-------------------------------------------------------------------- +# SC_MISSING_POSIX_HEADERS +# +# Supply substitutes for missing POSIX header files. Special +# notes: +# - stdlib.h doesn't define strtol, strtoul, or +# strtod insome versions of SunOS +# - some versions of string.h don't declare procedures such +# as strstr +# +# Arguments: +# none +# +# Results: +# +# Defines some of the following vars: +# NO_DIRENT_H +# NO_VALUES_H +# HAVE_LIMITS_H or NO_LIMITS_H +# NO_STDLIB_H +# NO_STRING_H +# NO_SYS_WAIT_H +# NO_DLFCN_H +# HAVE_SYS_PARAM_H +# +# HAVE_STRING_H ? +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_MISSING_POSIX_HEADERS], [ + AC_CACHE_CHECK([dirent.h], tcl_cv_dirent_h, [ + AC_TRY_LINK([#include +#include ], [ +#ifndef _POSIX_SOURCE +# ifdef __Lynx__ + /* + * Generate compilation error to make the test fail: Lynx headers + * are only valid if really in the POSIX environment. + */ + + missing_procedure(); +# endif +#endif +DIR *d; +struct dirent *entryPtr; +char *p; +d = opendir("foobar"); +entryPtr = readdir(d); +p = entryPtr->d_name; +closedir(d); +], tcl_cv_dirent_h=yes, tcl_cv_dirent_h=no)]) + + if test $tcl_cv_dirent_h = no; then + AC_DEFINE(NO_DIRENT_H, 1, [Do we have ?]) + fi + + AC_CHECK_HEADER(float.h, , [AC_DEFINE(NO_FLOAT_H, 1, [Do we have ?])]) + AC_CHECK_HEADER(values.h, , [AC_DEFINE(NO_VALUES_H, 1, [Do we have ?])]) + AC_CHECK_HEADER(limits.h, + [AC_DEFINE(HAVE_LIMITS_H, 1, [Do we have ?])], + [AC_DEFINE(NO_LIMITS_H, 1, [Do we have ?])]) + AC_CHECK_HEADER(stdlib.h, tcl_ok=1, tcl_ok=0) + AC_EGREP_HEADER(strtol, stdlib.h, , tcl_ok=0) + AC_EGREP_HEADER(strtoul, stdlib.h, , tcl_ok=0) + AC_EGREP_HEADER(strtod, stdlib.h, , tcl_ok=0) + if test $tcl_ok = 0; then + AC_DEFINE(NO_STDLIB_H, 1, [Do we have ?]) + fi + AC_CHECK_HEADER(string.h, tcl_ok=1, tcl_ok=0) + AC_EGREP_HEADER(strstr, string.h, , tcl_ok=0) + AC_EGREP_HEADER(strerror, string.h, , tcl_ok=0) + + # See also memmove check below for a place where NO_STRING_H can be + # set and why. + + if test $tcl_ok = 0; then + AC_DEFINE(NO_STRING_H, 1, [Do we have ?]) + fi + + AC_CHECK_HEADER(sys/wait.h, , [AC_DEFINE(NO_SYS_WAIT_H, 1, [Do we have ?])]) + AC_CHECK_HEADER(dlfcn.h, , [AC_DEFINE(NO_DLFCN_H, 1, [Do we have ?])]) + + # OS/390 lacks sys/param.h (and doesn't need it, by chance). + AC_HAVE_HEADERS(sys/param.h) +]) + +#-------------------------------------------------------------------- +# SC_PATH_X +# +# Locate the X11 header files and the X11 library archive. Try +# the ac_path_x macro first, but if it doesn't find the X stuff +# (e.g. because there's no xmkmf program) then check through +# a list of possible directories. Under some conditions the +# autoconf macro will return an include directory that contains +# no include files, so double-check its result just to be safe. +# +# Arguments: +# none +# +# Results: +# +# Sets the the following vars: +# XINCLUDES +# XLIBSW +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_PATH_X], [ + AC_PATH_X + not_really_there="" + if test "$no_x" = ""; then + if test "$x_includes" = ""; then + AC_TRY_CPP([#include ], , not_really_there="yes") + else + if test ! -r $x_includes/X11/Intrinsic.h; then + not_really_there="yes" + fi + fi + fi + if test "$no_x" = "yes" -o "$not_really_there" = "yes"; then + AC_MSG_CHECKING([for X11 header files]) + found_xincludes="no" + AC_TRY_CPP([#include ], found_xincludes="yes", found_xincludes="no") + if test "$found_xincludes" = "no"; then + dirs="/usr/unsupported/include /usr/local/include /usr/X386/include /usr/X11R6/include /usr/X11R5/include /usr/include/X11R5 /usr/include/X11R4 /usr/openwin/include /usr/X11/include /usr/sww/include" + for i in $dirs ; do + if test -r $i/X11/Intrinsic.h; then + AC_MSG_RESULT([$i]) + XINCLUDES=" -I$i" + found_xincludes="yes" + break + fi + done + fi + else + if test "$x_includes" != ""; then + XINCLUDES="-I$x_includes" + found_xincludes="yes" + fi + fi + if test found_xincludes = "no"; then + AC_MSG_RESULT([couldn't find any!]) + fi + + if test "$no_x" = yes; then + AC_MSG_CHECKING([for X11 libraries]) + XLIBSW=nope + dirs="/usr/unsupported/lib /usr/local/lib /usr/X386/lib /usr/X11R6/lib /usr/X11R5/lib /usr/lib/X11R5 /usr/lib/X11R4 /usr/openwin/lib /usr/X11/lib /usr/sww/X11/lib" + for i in $dirs ; do + if test -r $i/libX11.a -o -r $i/libX11.so -o -r $i/libX11.sl -o -r $i/libX11.dylib; then + AC_MSG_RESULT([$i]) + XLIBSW="-L$i -lX11" + x_libraries="$i" + break + fi + done + else + if test "$x_libraries" = ""; then + XLIBSW=-lX11 + else + XLIBSW="-L$x_libraries -lX11" + fi + fi + if test "$XLIBSW" = nope ; then + AC_CHECK_LIB(Xwindow, XCreateWindow, XLIBSW=-lXwindow) + fi + if test "$XLIBSW" = nope ; then + AC_MSG_RESULT([could not find any! Using -lX11.]) + XLIBSW=-lX11 + fi +]) + +#-------------------------------------------------------------------- +# SC_BLOCKING_STYLE +# +# The statements below check for systems where POSIX-style +# non-blocking I/O (O_NONBLOCK) doesn't work or is unimplemented. +# On these systems (mostly older ones), use the old BSD-style +# FIONBIO approach instead. +# +# Arguments: +# none +# +# Results: +# +# Defines some of the following vars: +# HAVE_SYS_IOCTL_H +# HAVE_SYS_FILIO_H +# USE_FIONBIO +# O_NONBLOCK +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_BLOCKING_STYLE], [ + AC_CHECK_HEADERS(sys/ioctl.h) + AC_CHECK_HEADERS(sys/filio.h) + SC_CONFIG_SYSTEM + AC_MSG_CHECKING([FIONBIO vs. O_NONBLOCK for nonblocking I/O]) + case $system in + # There used to be code here to use FIONBIO under AIX. However, it + # was reported that FIONBIO doesn't work under AIX 3.2.5. Since + # using O_NONBLOCK seems fine under AIX 4.*, I removed the FIONBIO + # code (JO, 5/31/97). + + OSF*) + AC_DEFINE(USE_FIONBIO, 1, [Should we use FIONBIO?]) + AC_MSG_RESULT([FIONBIO]) + ;; + SunOS-4*) + AC_DEFINE(USE_FIONBIO, 1, [Should we use FIONBIO?]) + AC_MSG_RESULT([FIONBIO]) + ;; + *) + AC_MSG_RESULT([O_NONBLOCK]) + ;; + esac +]) + +#-------------------------------------------------------------------- +# SC_TIME_HANLDER +# +# Checks how the system deals with time.h, what time structures +# are used on the system, and what fields the structures have. +# +# Arguments: +# none +# +# Results: +# +# Defines some of the following vars: +# USE_DELTA_FOR_TZ +# HAVE_TM_GMTOFF +# HAVE_TM_TZADJ +# HAVE_TIMEZONE_VAR +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TIME_HANDLER], [ + AC_CHECK_HEADERS(sys/time.h) + AC_HEADER_TIME + AC_STRUCT_TIMEZONE + + AC_CHECK_FUNCS(gmtime_r localtime_r mktime) + + AC_CACHE_CHECK([tm_tzadj in struct tm], tcl_cv_member_tm_tzadj, [ + AC_TRY_COMPILE([#include ], [struct tm tm; tm.tm_tzadj;], + tcl_cv_member_tm_tzadj=yes, tcl_cv_member_tm_tzadj=no)]) + if test $tcl_cv_member_tm_tzadj = yes ; then + AC_DEFINE(HAVE_TM_TZADJ, 1, [Should we use the tm_tzadj field of struct tm?]) + fi + + AC_CACHE_CHECK([tm_gmtoff in struct tm], tcl_cv_member_tm_gmtoff, [ + AC_TRY_COMPILE([#include ], [struct tm tm; tm.tm_gmtoff;], + tcl_cv_member_tm_gmtoff=yes, tcl_cv_member_tm_gmtoff=no)]) + if test $tcl_cv_member_tm_gmtoff = yes ; then + AC_DEFINE(HAVE_TM_GMTOFF, 1, [Should we use the tm_gmtoff field of struct tm?]) + fi + + # + # Its important to include time.h in this check, as some systems + # (like convex) have timezone functions, etc. + # + AC_CACHE_CHECK([long timezone variable], tcl_cv_timezone_long, [ + AC_TRY_COMPILE([#include ], + [extern long timezone; + timezone += 1; + exit (0);], + tcl_cv_timezone_long=yes, tcl_cv_timezone_long=no)]) + if test $tcl_cv_timezone_long = yes ; then + AC_DEFINE(HAVE_TIMEZONE_VAR, 1, [Should we use the global timezone variable?]) + else + # + # On some systems (eg IRIX 6.2), timezone is a time_t and not a long. + # + AC_CACHE_CHECK([time_t timezone variable], tcl_cv_timezone_time, [ + AC_TRY_COMPILE([#include ], + [extern time_t timezone; + timezone += 1; + exit (0);], + tcl_cv_timezone_time=yes, tcl_cv_timezone_time=no)]) + if test $tcl_cv_timezone_time = yes ; then + AC_DEFINE(HAVE_TIMEZONE_VAR, 1, [Should we use the global timezone variable?]) + fi + fi +]) + +#-------------------------------------------------------------------- +# SC_BUGGY_STRTOD +# +# Under Solaris 2.4, strtod returns the wrong value for the +# terminating character under some conditions. Check for this +# and if the problem exists use a substitute procedure +# "fixstrtod" (provided by Tcl) that corrects the error. +# Also, on Compaq's Tru64 Unix 5.0, +# strtod(" ") returns 0.0 instead of a failure to convert. +# +# Arguments: +# none +# +# Results: +# +# Might defines some of the following vars: +# strtod (=fixstrtod) +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_BUGGY_STRTOD], [ + AC_CHECK_FUNC(strtod, tcl_strtod=1, tcl_strtod=0) + if test "$tcl_strtod" = 1; then + AC_CACHE_CHECK([for Solaris2.4/Tru64 strtod bugs], tcl_cv_strtod_buggy,[ + AC_TRY_RUN([ + extern double strtod(); + int main() { + char *infString="Inf", *nanString="NaN", *spaceString=" "; + char *term; + double value; + value = strtod(infString, &term); + if ((term != infString) && (term[-1] == 0)) { + exit(1); + } + value = strtod(nanString, &term); + if ((term != nanString) && (term[-1] == 0)) { + exit(1); + } + value = strtod(spaceString, &term); + if (term == (spaceString+1)) { + exit(1); + } + exit(0); + }], tcl_cv_strtod_buggy=ok, tcl_cv_strtod_buggy=buggy, + tcl_cv_strtod_buggy=buggy)]) + if test "$tcl_cv_strtod_buggy" = buggy; then + AC_LIBOBJ([fixstrtod]) + USE_COMPAT=1 + AC_DEFINE(strtod, fixstrtod, [Do we want to use the strtod() in compat?]) + fi + fi +]) + +#-------------------------------------------------------------------- +# SC_TCL_LINK_LIBS +# +# Search for the libraries needed to link the Tcl shell. +# Things like the math library (-lm) and socket stuff (-lsocket vs. +# -lnsl) are dealt with here. +# +# Arguments: +# None. +# +# Results: +# +# Might append to the following vars: +# LIBS +# MATH_LIBS +# +# Might define the following vars: +# HAVE_NET_ERRNO_H +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_LINK_LIBS], [ + #-------------------------------------------------------------------- + # On a few very rare systems, all of the libm.a stuff is + # already in libc.a. Set compiler flags accordingly. + # Also, Linux requires the "ieee" library for math to work + # right (and it must appear before "-lm"). + #-------------------------------------------------------------------- + + AC_CHECK_FUNC(sin, MATH_LIBS="", MATH_LIBS="-lm") + AC_CHECK_LIB(ieee, main, [MATH_LIBS="-lieee $MATH_LIBS"]) + + #-------------------------------------------------------------------- + # Interactive UNIX requires -linet instead of -lsocket, plus it + # needs net/errno.h to define the socket-related error codes. + #-------------------------------------------------------------------- + + AC_CHECK_LIB(inet, main, [LIBS="$LIBS -linet"]) + AC_CHECK_HEADER(net/errno.h, [ + AC_DEFINE(HAVE_NET_ERRNO_H, 1, [Do we have ?])]) + + #-------------------------------------------------------------------- + # Check for the existence of the -lsocket and -lnsl libraries. + # The order here is important, so that they end up in the right + # order in the command line generated by make. Here are some + # special considerations: + # 1. Use "connect" and "accept" to check for -lsocket, and + # "gethostbyname" to check for -lnsl. + # 2. Use each function name only once: can't redo a check because + # autoconf caches the results of the last check and won't redo it. + # 3. Use -lnsl and -lsocket only if they supply procedures that + # aren't already present in the normal libraries. This is because + # IRIX 5.2 has libraries, but they aren't needed and they're + # bogus: they goof up name resolution if used. + # 4. On some SVR4 systems, can't use -lsocket without -lnsl too. + # To get around this problem, check for both libraries together + # if -lsocket doesn't work by itself. + #-------------------------------------------------------------------- + + tcl_checkBoth=0 + AC_CHECK_FUNC(connect, tcl_checkSocket=0, tcl_checkSocket=1) + if test "$tcl_checkSocket" = 1; then + AC_CHECK_FUNC(setsockopt, , [AC_CHECK_LIB(socket, setsockopt, + LIBS="$LIBS -lsocket", tcl_checkBoth=1)]) + fi + if test "$tcl_checkBoth" = 1; then + tk_oldLibs=$LIBS + LIBS="$LIBS -lsocket -lnsl" + AC_CHECK_FUNC(accept, tcl_checkNsl=0, [LIBS=$tk_oldLibs]) + fi + AC_CHECK_FUNC(gethostbyname, , [AC_CHECK_LIB(nsl, gethostbyname, + [LIBS="$LIBS -lnsl"])]) +]) + +#-------------------------------------------------------------------- +# SC_TCL_EARLY_FLAGS +# +# Check for what flags are needed to be passed so the correct OS +# features are available. +# +# Arguments: +# None +# +# Results: +# +# Might define the following vars: +# _ISOC99_SOURCE +# _LARGEFILE64_SOURCE +# _LARGEFILE_SOURCE64 +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_EARLY_FLAG],[ + AC_CACHE_VAL([tcl_cv_flag_]translit($1,[A-Z],[a-z]), + AC_TRY_COMPILE([$2], $3, [tcl_cv_flag_]translit($1,[A-Z],[a-z])=no, + AC_TRY_COMPILE([[#define ]$1[ 1 +]$2], $3, + [tcl_cv_flag_]translit($1,[A-Z],[a-z])=yes, + [tcl_cv_flag_]translit($1,[A-Z],[a-z])=no))) + if test ["x${tcl_cv_flag_]translit($1,[A-Z],[a-z])[}" = "xyes"] ; then + AC_DEFINE($1, 1, [Add the ]$1[ flag when building]) + tcl_flags="$tcl_flags $1" + fi +]) + +AC_DEFUN([SC_TCL_EARLY_FLAGS],[ + AC_MSG_CHECKING([for required early compiler flags]) + tcl_flags="" + SC_TCL_EARLY_FLAG(_ISOC99_SOURCE,[#include ], + [char *p = (char *)strtoll; char *q = (char *)strtoull;]) + SC_TCL_EARLY_FLAG(_LARGEFILE64_SOURCE,[#include ], + [struct stat64 buf; int i = stat64("/", &buf);]) + SC_TCL_EARLY_FLAG(_LARGEFILE_SOURCE64,[#include ], + [char *p = (char *)open64;]) + if test "x${tcl_flags}" = "x" ; then + AC_MSG_RESULT([none]) + else + AC_MSG_RESULT([${tcl_flags}]) + fi +]) + +#-------------------------------------------------------------------- +# SC_TCL_64BIT_FLAGS +# +# Check for what is defined in the way of 64-bit features. +# +# Arguments: +# None +# +# Results: +# +# Might define the following vars: +# TCL_WIDE_INT_IS_LONG +# TCL_WIDE_INT_TYPE +# HAVE_STRUCT_DIRENT64 +# HAVE_STRUCT_STAT64 +# HAVE_TYPE_OFF64_T +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_64BIT_FLAGS], [ + AC_MSG_CHECKING([for 64-bit integer type]) + AC_CACHE_VAL(tcl_cv_type_64bit,[ + tcl_cv_type_64bit=none + # See if the compiler knows natively about __int64 + AC_TRY_COMPILE(,[__int64 value = (__int64) 0;], + tcl_type_64bit=__int64, tcl_type_64bit="long long") + # See if we should use long anyway Note that we substitute in the + # type that is our current guess for a 64-bit type inside this check + # program, so it should be modified only carefully... + AC_TRY_COMPILE(,[switch (0) { + case 1: case (sizeof(]${tcl_type_64bit}[)==sizeof(long)): ; + }],tcl_cv_type_64bit=${tcl_type_64bit})]) + if test "${tcl_cv_type_64bit}" = none ; then + AC_DEFINE(TCL_WIDE_INT_IS_LONG, 1, [Are wide integers to be implemented with C 'long's?]) + AC_MSG_RESULT([using long]) + else + AC_DEFINE_UNQUOTED(TCL_WIDE_INT_TYPE,${tcl_cv_type_64bit}, + [What type should be used to define wide integers?]) + AC_MSG_RESULT([${tcl_cv_type_64bit}]) + + # Now check for auxiliary declarations + AC_CACHE_CHECK([for struct dirent64], tcl_cv_struct_dirent64,[ + AC_TRY_COMPILE([#include +#include ],[struct dirent64 p;], + tcl_cv_struct_dirent64=yes,tcl_cv_struct_dirent64=no)]) + if test "x${tcl_cv_struct_dirent64}" = "xyes" ; then + AC_DEFINE(HAVE_STRUCT_DIRENT64, 1, [Is 'struct dirent64' in ?]) + fi + + AC_CACHE_CHECK([for struct stat64], tcl_cv_struct_stat64,[ + AC_TRY_COMPILE([#include ],[struct stat64 p; +], + tcl_cv_struct_stat64=yes,tcl_cv_struct_stat64=no)]) + if test "x${tcl_cv_struct_stat64}" = "xyes" ; then + AC_DEFINE(HAVE_STRUCT_STAT64, 1, [Is 'struct stat64' in ?]) + fi + + AC_CHECK_FUNCS(open64 lseek64) + AC_MSG_CHECKING([for off64_t]) + AC_CACHE_VAL(tcl_cv_type_off64_t,[ + AC_TRY_COMPILE([#include ],[off64_t offset; +], + tcl_cv_type_off64_t=yes,tcl_cv_type_off64_t=no)]) + dnl Define HAVE_TYPE_OFF64_T only when the off64_t type and the + dnl functions lseek64 and open64 are defined. + if test "x${tcl_cv_type_off64_t}" = "xyes" && \ + test "x${ac_cv_func_lseek64}" = "xyes" && \ + test "x${ac_cv_func_open64}" = "xyes" ; then + AC_DEFINE(HAVE_TYPE_OFF64_T, 1, [Is off64_t in ?]) + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + fi +]) + +#-------------------------------------------------------------------- +# SC_TCL_CFG_ENCODING TIP #59 +# +# Declare the encoding to use for embedded configuration information. +# +# Arguments: +# None. +# +# Results: +# Might append to the following vars: +# DEFS (implicit) +# +# Will define the following vars: +# TCL_CFGVAL_ENCODING +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_CFG_ENCODING], [ + AC_ARG_WITH(encoding, + AC_HELP_STRING([--with-encoding], + [encoding for configuration values (default: iso8859-1)]), + with_tcencoding=${withval}) + + if test x"${with_tcencoding}" != x ; then + AC_DEFINE_UNQUOTED(TCL_CFGVAL_ENCODING,"${with_tcencoding}", + [What encoding should be used for embedded configuration info?]) + else + AC_DEFINE(TCL_CFGVAL_ENCODING,"iso8859-1", + [What encoding should be used for embedded configuration info?]) + fi +]) + +#-------------------------------------------------------------------- +# SC_TCL_CHECK_BROKEN_FUNC +# +# Check for broken function. +# +# Arguments: +# funcName - function to test for +# advancedTest - the advanced test to run if the function is present +# +# Results: +# Might cause compatability versions of the function to be used. +# Might affect the following vars: +# USE_COMPAT (implicit) +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_CHECK_BROKEN_FUNC],[ + AC_CHECK_FUNC($1, tcl_ok=1, tcl_ok=0) + if test ["$tcl_ok"] = 1; then + AC_CACHE_CHECK([proper ]$1[ implementation], [tcl_cv_]$1[_unbroken], + AC_TRY_RUN([[int main() {]$2[}]],[tcl_cv_]$1[_unbroken]=ok, + [tcl_cv_]$1[_unbroken]=broken,[tcl_cv_]$1[_unbroken]=unknown)) + if test ["$tcl_cv_]$1[_unbroken"] = "ok"; then + tcl_ok=1 + else + tcl_ok=0 + fi + fi + if test ["$tcl_ok"] = 0; then + AC_LIBOBJ($1) + USE_COMPAT=1 + fi +]) + +#-------------------------------------------------------------------- +# SC_TCL_GETHOSTBYADDR_R +# +# Check if we have MT-safe variant of gethostbyaddr(). +# +# Arguments: +# None +# +# Results: +# +# Might define the following vars: +# HAVE_GETHOSTBYADDR_R +# HAVE_GETHOSTBYADDR_R_7 +# HAVE_GETHOSTBYADDR_R_8 +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_GETHOSTBYADDR_R], [AC_CHECK_FUNC(gethostbyaddr_r, [ + AC_CACHE_CHECK([for gethostbyaddr_r with 7 args], tcl_cv_api_gethostbyaddr_r_7, [ + AC_TRY_COMPILE([ + #include + ], [ + char *addr; + int length; + int type; + struct hostent *result; + char buffer[2048]; + int buflen = 2048; + int h_errnop; + + (void) gethostbyaddr_r(addr, length, type, result, buffer, buflen, + &h_errnop); + ], tcl_cv_api_gethostbyaddr_r_7=yes, tcl_cv_api_gethostbyaddr_r_7=no)]) + tcl_ok=$tcl_cv_api_gethostbyaddr_r_7 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETHOSTBYADDR_R_7, 1, + [Define to 1 if gethostbyaddr_r takes 7 args.]) + else + AC_CACHE_CHECK([for gethostbyaddr_r with 8 args], tcl_cv_api_gethostbyaddr_r_8, [ + AC_TRY_COMPILE([ + #include + ], [ + char *addr; + int length; + int type; + struct hostent *result, *resultp; + char buffer[2048]; + int buflen = 2048; + int h_errnop; + + (void) gethostbyaddr_r(addr, length, type, result, buffer, buflen, + &resultp, &h_errnop); + ], tcl_cv_api_gethostbyaddr_r_8=yes, tcl_cv_api_gethostbyaddr_r_8=no)]) + tcl_ok=$tcl_cv_api_gethostbyaddr_r_8 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETHOSTBYADDR_R_8, 1, + [Define to 1 if gethostbyaddr_r takes 8 args.]) + fi + fi + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETHOSTBYADDR_R, 1, + [Define to 1 if gethostbyaddr_r is available.]) + fi +])]) + +#-------------------------------------------------------------------- +# SC_TCL_GETHOSTBYNAME_R +# +# Check to see what variant of gethostbyname_r() we have. +# Based on David Arnold's example from the comp.programming.threads +# FAQ Q213 +# +# Arguments: +# None +# +# Results: +# +# Might define the following vars: +# HAVE_GETHOSTBYADDR_R +# HAVE_GETHOSTBYADDR_R_3 +# HAVE_GETHOSTBYADDR_R_5 +# HAVE_GETHOSTBYADDR_R_6 +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_GETHOSTBYNAME_R], [AC_CHECK_FUNC(gethostbyname_r, [ + AC_CACHE_CHECK([for gethostbyname_r with 6 args], tcl_cv_api_gethostbyname_r_6, [ + AC_TRY_COMPILE([ + #include + ], [ + char *name; + struct hostent *he, *res; + char buffer[2048]; + int buflen = 2048; + int h_errnop; + + (void) gethostbyname_r(name, he, buffer, buflen, &res, &h_errnop); + ], tcl_cv_api_gethostbyname_r_6=yes, tcl_cv_api_gethostbyname_r_6=no)]) + tcl_ok=$tcl_cv_api_gethostbyname_r_6 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETHOSTBYNAME_R_6, 1, + [Define to 1 if gethostbyname_r takes 6 args.]) + else + AC_CACHE_CHECK([for gethostbyname_r with 5 args], tcl_cv_api_gethostbyname_r_5, [ + AC_TRY_COMPILE([ + #include + ], [ + char *name; + struct hostent *he; + char buffer[2048]; + int buflen = 2048; + int h_errnop; + + (void) gethostbyname_r(name, he, buffer, buflen, &h_errnop); + ], tcl_cv_api_gethostbyname_r_5=yes, tcl_cv_api_gethostbyname_r_5=no)]) + tcl_ok=$tcl_cv_api_gethostbyname_r_5 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETHOSTBYNAME_R_5, 1, + [Define to 1 if gethostbyname_r takes 5 args.]) + else + AC_CACHE_CHECK([for gethostbyname_r with 3 args], tcl_cv_api_gethostbyname_r_3, [ + AC_TRY_COMPILE([ + #include + ], [ + char *name; + struct hostent *he; + struct hostent_data data; + + (void) gethostbyname_r(name, he, &data); + ], tcl_cv_api_gethostbyname_r_3=yes, tcl_cv_api_gethostbyname_r_3=no)]) + tcl_ok=$tcl_cv_api_gethostbyname_r_3 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETHOSTBYNAME_R_3, 1, + [Define to 1 if gethostbyname_r takes 3 args.]) + fi + fi + fi + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETHOSTBYNAME_R, 1, + [Define to 1 if gethostbyname_r is available.]) + fi +])]) + +#-------------------------------------------------------------------- +# SC_TCL_GETADDRINFO +# +# Check if we have 'getaddrinfo' +# +# Arguments: +# None +# +# Results: +# Might define the following vars: +# HAVE_GETADDRINFO +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_GETADDRINFO], [AC_CHECK_FUNC(getaddrinfo, [ + AC_CACHE_CHECK([for working getaddrinfo], tcl_cv_api_getaddrinfo, [ + AC_TRY_COMPILE([ + #include + ], [ + const char *name, *port; + struct addrinfo *aiPtr, hints; + (void)getaddrinfo(name,port, &hints, &aiPtr); + (void)freeaddrinfo(aiPtr); + ], tcl_cv_api_getaddrinfo=yes, tcl_cv_getaddrinfo=no)]) + tcl_ok=$tcl_cv_api_getaddrinfo + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETADDRINFO, 1, + [Define to 1 if getaddrinfo is available.]) + fi +])]) + +#-------------------------------------------------------------------- +# SC_TCL_GETPWUID_R +# +# Check if we have MT-safe variant of getpwuid() and if yes, +# which one exactly. +# +# Arguments: +# None +# +# Results: +# +# Might define the following vars: +# HAVE_GETPWUID_R +# HAVE_GETPWUID_R_4 +# HAVE_GETPWUID_R_5 +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_GETPWUID_R], [AC_CHECK_FUNC(getpwuid_r, [ + AC_CACHE_CHECK([for getpwuid_r with 5 args], tcl_cv_api_getpwuid_r_5, [ + AC_TRY_COMPILE([ + #include + #include + ], [ + uid_t uid; + struct passwd pw, *pwp; + char buf[512]; + int buflen = 512; + + (void) getpwuid_r(uid, &pw, buf, buflen, &pwp); + ], tcl_cv_api_getpwuid_r_5=yes, tcl_cv_api_getpwuid_r_5=no)]) + tcl_ok=$tcl_cv_api_getpwuid_r_5 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETPWUID_R_5, 1, + [Define to 1 if getpwuid_r takes 5 args.]) + else + AC_CACHE_CHECK([for getpwuid_r with 4 args], tcl_cv_api_getpwuid_r_4, [ + AC_TRY_COMPILE([ + #include + #include + ], [ + uid_t uid; + struct passwd pw; + char buf[512]; + int buflen = 512; + + (void)getpwnam_r(uid, &pw, buf, buflen); + ], tcl_cv_api_getpwuid_r_4=yes, tcl_cv_api_getpwuid_r_4=no)]) + tcl_ok=$tcl_cv_api_getpwuid_r_4 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETPWUID_R_4, 1, + [Define to 1 if getpwuid_r takes 4 args.]) + fi + fi + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETPWUID_R, 1, + [Define to 1 if getpwuid_r is available.]) + fi +])]) + +#-------------------------------------------------------------------- +# SC_TCL_GETPWNAM_R +# +# Check if we have MT-safe variant of getpwnam() and if yes, +# which one exactly. +# +# Arguments: +# None +# +# Results: +# +# Might define the following vars: +# HAVE_GETPWNAM_R +# HAVE_GETPWNAM_R_4 +# HAVE_GETPWNAM_R_5 +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_GETPWNAM_R], [AC_CHECK_FUNC(getpwnam_r, [ + AC_CACHE_CHECK([for getpwnam_r with 5 args], tcl_cv_api_getpwnam_r_5, [ + AC_TRY_COMPILE([ + #include + #include + ], [ + char *name; + struct passwd pw, *pwp; + char buf[512]; + int buflen = 512; + + (void) getpwnam_r(name, &pw, buf, buflen, &pwp); + ], tcl_cv_api_getpwnam_r_5=yes, tcl_cv_api_getpwnam_r_5=no)]) + tcl_ok=$tcl_cv_api_getpwnam_r_5 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETPWNAM_R_5, 1, + [Define to 1 if getpwnam_r takes 5 args.]) + else + AC_CACHE_CHECK([for getpwnam_r with 4 args], tcl_cv_api_getpwnam_r_4, [ + AC_TRY_COMPILE([ + #include + #include + ], [ + char *name; + struct passwd pw; + char buf[512]; + int buflen = 512; + + (void)getpwnam_r(name, &pw, buf, buflen); + ], tcl_cv_api_getpwnam_r_4=yes, tcl_cv_api_getpwnam_r_4=no)]) + tcl_ok=$tcl_cv_api_getpwnam_r_4 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETPWNAM_R_4, 1, + [Define to 1 if getpwnam_r takes 4 args.]) + fi + fi + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETPWNAM_R, 1, + [Define to 1 if getpwnam_r is available.]) + fi +])]) + +#-------------------------------------------------------------------- +# SC_TCL_GETGRGID_R +# +# Check if we have MT-safe variant of getgrgid() and if yes, +# which one exactly. +# +# Arguments: +# None +# +# Results: +# +# Might define the following vars: +# HAVE_GETGRGID_R +# HAVE_GETGRGID_R_4 +# HAVE_GETGRGID_R_5 +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_GETGRGID_R], [AC_CHECK_FUNC(getgrgid_r, [ + AC_CACHE_CHECK([for getgrgid_r with 5 args], tcl_cv_api_getgrgid_r_5, [ + AC_TRY_COMPILE([ + #include + #include + ], [ + gid_t gid; + struct group gr, *grp; + char buf[512]; + int buflen = 512; + + (void) getgrgid_r(gid, &gr, buf, buflen, &grp); + ], tcl_cv_api_getgrgid_r_5=yes, tcl_cv_api_getgrgid_r_5=no)]) + tcl_ok=$tcl_cv_api_getgrgid_r_5 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETGRGID_R_5, 1, + [Define to 1 if getgrgid_r takes 5 args.]) + else + AC_CACHE_CHECK([for getgrgid_r with 4 args], tcl_cv_api_getgrgid_r_4, [ + AC_TRY_COMPILE([ + #include + #include + ], [ + gid_t gid; + struct group gr; + char buf[512]; + int buflen = 512; + + (void)getgrgid_r(gid, &gr, buf, buflen); + ], tcl_cv_api_getgrgid_r_4=yes, tcl_cv_api_getgrgid_r_4=no)]) + tcl_ok=$tcl_cv_api_getgrgid_r_4 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETGRGID_R_4, 1, + [Define to 1 if getgrgid_r takes 4 args.]) + fi + fi + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETGRGID_R, 1, + [Define to 1 if getgrgid_r is available.]) + fi +])]) + +#-------------------------------------------------------------------- +# SC_TCL_GETGRNAM_R +# +# Check if we have MT-safe variant of getgrnam() and if yes, +# which one exactly. +# +# Arguments: +# None +# +# Results: +# +# Might define the following vars: +# HAVE_GETGRNAM_R +# HAVE_GETGRNAM_R_4 +# HAVE_GETGRNAM_R_5 +# +#-------------------------------------------------------------------- + +AC_DEFUN([SC_TCL_GETGRNAM_R], [AC_CHECK_FUNC(getgrnam_r, [ + AC_CACHE_CHECK([for getgrnam_r with 5 args], tcl_cv_api_getgrnam_r_5, [ + AC_TRY_COMPILE([ + #include + #include + ], [ + char *name; + struct group gr, *grp; + char buf[512]; + int buflen = 512; + + (void) getgrnam_r(name, &gr, buf, buflen, &grp); + ], tcl_cv_api_getgrnam_r_5=yes, tcl_cv_api_getgrnam_r_5=no)]) + tcl_ok=$tcl_cv_api_getgrnam_r_5 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETGRNAM_R_5, 1, + [Define to 1 if getgrnam_r takes 5 args.]) + else + AC_CACHE_CHECK([for getgrnam_r with 4 args], tcl_cv_api_getgrnam_r_4, [ + AC_TRY_COMPILE([ + #include + #include + ], [ + char *name; + struct group gr; + char buf[512]; + int buflen = 512; + + (void)getgrnam_r(name, &gr, buf, buflen); + ], tcl_cv_api_getgrnam_r_4=yes, tcl_cv_api_getgrnam_r_4=no)]) + tcl_ok=$tcl_cv_api_getgrnam_r_4 + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETGRNAM_R_4, 1, + [Define to 1 if getgrnam_r takes 4 args.]) + fi + fi + if test "$tcl_ok" = yes; then + AC_DEFINE(HAVE_GETGRNAM_R, 1, + [Define to 1 if getgrnam_r is available.]) + fi +])]) + +# Local Variables: +# mode: autoconf +# End: diff --git a/build/tcmalloc.m4 b/build/tcmalloc.m4 new file mode 100644 index 00000000..e369d739 --- /dev/null +++ b/build/tcmalloc.m4 @@ -0,0 +1,54 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl +dnl tcmalloc.m4: Trafficserver's tcmalloc autoconf macros +dnl + +dnl This is kinda fugly, but need a way to both specify a directory and which +dnl of the many tcmalloc libraries to use ... +AC_DEFUN([TS_CHECK_TCMALLOC], [ +AC_ARG_WITH([tcmalloc-lib], + [AS_HELP_STRING([--with-tcmalloc-lib],[specify the tcmalloc library to use [default=tcmalloc]])], + [ + with_tcmalloc_lib="$withval" + ],[ + with_tcmalloc_lib="tcmalloc" + ] +) + +has_tcmalloc=0 +AC_ARG_WITH([tcmalloc], [AC_HELP_STRING([--with-tcmalloc=DIR], [use the tcmalloc library])], +[ + if test "$withval" != "no"; then + if test "x${enable_jemalloc}" = "xyes"; then + AC_MSG_ERROR([Cannot compile with both tcmalloc and jemalloc]) + fi + tcmalloc_have_libs=0 + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + tcmalloc_ldflags="$withval/lib" + TS_ADDTO(LDFLAGS, [-L${tcmalloc_ldflags}]) + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-rpath ${tcmalloc_ldflags}]) + fi + AC_SEARCH_LIBS([tc_cfree], ${with_tcmalloc_lib}, [tcmalloc_have_lib=1]) + if test "$tcmalloc_have_lib" != "0"; then + TS_ADDTO(LIBS, [-l${with_tcmalloc_lib}]) + has_tcmalloc=1 + fi + fi +]) +AC_SUBST(has_tcmalloc) +]) diff --git a/build/xml.m4 b/build/xml.m4 new file mode 100644 index 00000000..b639054c --- /dev/null +++ b/build/xml.m4 @@ -0,0 +1,200 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl +dnl xml.m4 Trafficserver's Xml autoconf macros +dnl + +dnl +dnl TS_CHECK_XML: look for xml libraries and headers +dnl +AC_DEFUN([TS_CHECK_XML], [ + enable_xml=no + AC_MSG_CHECKING(["For XML parser"]) + AC_ARG_WITH(xml, [AC_HELP_STRING([--with-xml=(expat|libxml2)],[select XML parser])], + [ + if test "$withval" = "expat" ; then + TS_CHECK_XML_EXPAT + elif test "$withval" = "libxml2" ; then + TS_CHECK_XML_LIBXML2 + else + AC_MSG_ERROR([Unrecognised --with-xml option]) + fi + ], + [ + TS_CHECK_XML_LIBXML2 + if test "$enable_xml" = "no"; then + TS_CHECK_XML_EXPAT + fi + ]) + if test "$enable_xml" = "no"; then + AC_MSG_ERROR([An XML parser (expat or libxml2) is required.]) + fi +]) +dnl + +AC_DEFUN([TS_CHECK_XML_LIBXML2], [ + enable_libxml2=no + libxml2_include="" + libxml2_ldflags="" + AC_ARG_WITH(libxml2, [AC_HELP_STRING([--with-libxml2=DIR],[use a specific libxml2 library])], + [ + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + if test "$withval" = "yes"; then + enable_libxml2=yes + libxml2_include="/usr/include/libxml2" + elif test "$withval" != "no"; then + enable_libxml2=yes + libxml2_include="$withval/include/libxml2" + libxml2_ldflags="-L$withval/lib" + fi + fi + ]) + if test ${enable_libxml2} = "no"; then + enable_libxml2=yes + libxml2_include="/usr/include/libxml2" + fi + if test ${enable_libxml2} != "no"; then + AC_CACHE_CHECK([libxml2], [ts_cv_libxml2], [ + ts_libxml2_CPPFLAGS=$CPPFLAGS + ts_libxml2_LIBS="$LIBS" + ts_libxml2_LDFLAGS="$LDFLAGS" + CPPFLAGS="$CPPFLAGS -I$libxml2_include" + LDFLAGS="$LDFLAGS $libxml2_ldflags" + LIBS="$LIBS -lxml2" + AC_TRY_LINK( + [#include ], + [xmlSAXHandler sax; xmlCreatePushParserCtxt(&sax, NULL, NULL, 0, NULL);], + [ts_cv_libxml2=yes], + [ts_cv_libxml2=no], + ) + CPPFLAGS=$ts_libxml2_CPPFLAGS + LIBS=$ts_libxml2_LIBS + LDFLAGS=$ts_libxml2_LDFLAGS + ]) + if test $ts_cv_libxml2 = yes ; then + AC_DEFINE([HAVE_LIBXML2], 1, [Using libxml2]) + if test -d "$libxml2_include" ; then + TS_ADDTO(CPPFLAGS, [-I${libxml2_include}]) + fi + if test -d "$libxml2_ldflags" ; then + TS_ADDTO(LDFLAGS, [-L${libxml2_ldflags}]) + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-R${libxml2_ldflags}]) + fi + TS_ADDTO(LIBS, -lxml2) + enable_xml=yes + else + AC_MSG_WARN(["Failed to find libxml2"]) + fi + fi +]) + +AC_DEFUN([TS_CHECK_XML_EXPAT], [ +enable_expat=no +AC_ARG_WITH(expat, [AC_HELP_STRING([--with-expat=DIR],[use a specific Expat library])], +[ + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + expat_base_dir="$withval" + if test "$withval" != "no"; then + enable_expat=yes + case "$withval" in + *":"*) + expat_include="`echo $withval |sed -e 's/:.*$//'`" + expat_ldflags="`echo $withval |sed -e 's/^.*://'`" + AC_MSG_CHECKING(checking for Expat includes in $expat_include libs in $expat_ldflags ) + ;; + *) + expat_include="$withval/include" + expat_ldflags="$withval/lib" + AC_MSG_CHECKING(checking for Expat includes in $withval) + ;; + esac + fi + fi +]) + +if test "x$expat_base_dir" = "x"; then + AC_MSG_CHECKING([for Expat location]) + AC_CACHE_VAL(ats_cv_expat_dir,[ + _expat_dir_list="" + case $host_os_def in + darwin) + for dir in "`xcrun -show-sdk-path 2>/dev/null`/usr" /usr/local /usr; do + if test -d $dir && test -f $dir/include/expat.h; then + ats_cv_expat_dir=$dir + break + fi + done + ;; + *) + for dir in /usr/local /usr; do + if test -d $dir && test -f $dir/include/expat.h; then + ats_cv_expat_dir=$dir + break + fi + done + ;; + esac + + unset _expat_dir_list + ]) + + expat_base_dir=$ats_cv_expat_dir + if test "x$expat_base_dir" = "x"; then + enable_expat=no + AC_MSG_RESULT([not found]) + else + enable_expat=yes + expat_include="$expat_base_dir/include" + expat_ldflags="$expat_base_dir/lib" + AC_MSG_RESULT([${expat_base_dir}]) + fi +else + if test -d $expat_include && test -d $expat_ldflags && test -f $expat_include/expat.h; then + AC_MSG_RESULT([ok]) + else + AC_MSG_RESULT([not found]) + fi +fi + +expath=0 +if test "$enable_expat" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + expat_have_headers=0 + expat_have_libs=0 + if test "$expat_base_dir" != "/usr"; then + TS_ADDTO(CPPFLAGS, [-I${expat_include}]) + TS_ADDTO(LDFLAGS, [-L${expat_ldflags}]) + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-R${expat_ldflags}]) + fi + AC_SEARCH_LIBS([XML_SetUserData], [expat], [expat_have_libs=1]) + if test "$expat_have_libs" != "0"; then + TS_FLAG_HEADERS(expat.h, [expat_have_headers=1]) + fi + if test "$expat_have_headers" != "0"; then + enable_xml=yes + + AC_SUBST([LIBEXPAT],["-lexpat"]) + AC_DEFINE([HAVE_LIBEXPAT],[1],[Define to 1 if you have Expat library]) + else + enable_expat=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi +AC_SUBST(expath) +]) diff --git a/build/zlib.m4 b/build/zlib.m4 new file mode 100644 index 00000000..b71a74af --- /dev/null +++ b/build/zlib.m4 @@ -0,0 +1,100 @@ +dnl -------------------------------------------------------- -*- autoconf -*- +dnl Licensed to the Apache Software Foundation (ASF) under one or more +dnl contributor license agreements. See the NOTICE file distributed with +dnl this work for additional information regarding copyright ownership. +dnl The ASF licenses this file to You under the Apache License, Version 2.0 +dnl (the "License"); you may not use this file except in compliance with +dnl the License. You may obtain a copy of the License at +dnl +dnl http://www.apache.org/licenses/LICENSE-2.0 +dnl +dnl Unless required by applicable law or agreed to in writing, software +dnl distributed under the License is distributed on an "AS IS" BASIS, +dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +dnl See the License for the specific language governing permissions and +dnl limitations under the License. + +dnl +dnl zlib.m4: Trafficserver's zlib autoconf macros +dnl + +dnl +dnl TS_CHECK_ZLIB: look for zlib libraries and headers +dnl +AC_DEFUN([TS_CHECK_ZLIB], [ +enable_zlib=no +AC_ARG_WITH(zlib, [AC_HELP_STRING([--with-zlib=DIR],[use a specific zlib library])], +[ + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + zlib_base_dir="$withval" + if test "$withval" != "no"; then + enable_zlib=yes + case "$withval" in + *":"*) + zlib_include="`echo $withval |sed -e 's/:.*$//'`" + zlib_ldflags="`echo $withval |sed -e 's/^.*://'`" + AC_MSG_CHECKING(checking for zlib includes in $zlib_include libs in $zlib_ldflags ) + ;; + *) + zlib_include="$withval/include" + zlib_ldflags="$withval/lib" + AC_MSG_CHECKING(checking for zlib includes in $withval) + ;; + esac + fi + fi +]) + +if test "x$zlib_base_dir" = "x"; then + AC_MSG_CHECKING([for zlib location]) + AC_CACHE_VAL(ats_cv_zlib_dir,[ + for dir in /usr/local /usr ; do + if test -d $dir && test -f $dir/include/zlib.h; then + ats_cv_zlib_dir=$dir + break + fi + done + ]) + zlib_base_dir=$ats_cv_zlib_dir + if test "x$zlib_base_dir" = "x"; then + enable_zlib=no + AC_MSG_RESULT([not found]) + else + enable_zlib=yes + zlib_include="$zlib_base_dir/include" + zlib_ldflags="$zlib_base_dir/lib" + AC_MSG_RESULT([$zlib_base_dir]) + fi +else + if test -d $zlib_include && test -d $zlib_ldflags && test -f $zlib_include/zlib.h; then + AC_MSG_RESULT([ok]) + else + AC_MSG_RESULT([not found]) + fi +fi + +zlibh=0 +if test "$enable_zlib" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + zlib_have_headers=0 + zlib_have_libs=0 + if test "$zlib_base_dir" != "/usr"; then + TS_ADDTO(CPPFLAGS, [-I${zlib_include}]) + TS_ADDTO(LDFLAGS, [-L${zlib_ldflags}]) + TS_ADDTO(LIBTOOL_LINK_FLAGS, [-rpath ${zlib_ldflags}]) + fi + AC_SEARCH_LIBS([compressBound], [z], [zlib_have_libs=1]) + if test "$zlib_have_libs" != "0"; then + TS_FLAG_HEADERS(zlib.h, [zlib_have_headers=1]) + fi + if test "$zlib_have_headers" != "0"; then + AC_SUBST(LIBZ, [-lz]) + else + enable_zlib=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi +AC_SUBST(zlibh) +]) diff --git a/cmd/Makefile.am b/cmd/Makefile.am new file mode 100644 index 00000000..58a8a68a --- /dev/null +++ b/cmd/Makefile.am @@ -0,0 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SUBDIRS = traffic_top traffic_cop traffic_line diff --git a/cmd/Makefile.in b/cmd/Makefile.in new file mode 100644 index 00000000..df3a7ce1 --- /dev/null +++ b/cmd/Makefile.in @@ -0,0 +1,866 @@ +# Makefile.in generated by automake 1.13.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = cmd +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/ax_boost_base.m4 \ + $(top_srcdir)/build/ax_check_openssl.m4 \ + $(top_srcdir)/build/ax_compare_version.m4 \ + $(top_srcdir)/build/ax_compiler_vendor.m4 \ + $(top_srcdir)/build/ax_cxx_compile_stdcxx_11.m4 \ + $(top_srcdir)/build/ax_lib_curl.m4 \ + $(top_srcdir)/build/ax_lib_readline.m4 \ + $(top_srcdir)/build/ax_path_generic.m4 \ + $(top_srcdir)/build/ax_perl_modules.m4 \ + $(top_srcdir)/build/ax_with_curses.m4 \ + $(top_srcdir)/build/common.m4 $(top_srcdir)/build/crypto.m4 \ + $(top_srcdir)/build/jemalloc.m4 $(top_srcdir)/build/libtool.m4 \ + $(top_srcdir)/build/ltoptions.m4 \ + $(top_srcdir)/build/ltsugar.m4 \ + $(top_srcdir)/build/ltversion.m4 \ + $(top_srcdir)/build/lt~obsolete.m4 $(top_srcdir)/build/lzma.m4 \ + $(top_srcdir)/build/network.m4 $(top_srcdir)/build/pcre.m4 \ + $(top_srcdir)/build/pkg.m4 $(top_srcdir)/build/tcl.m4 \ + $(top_srcdir)/build/tcmalloc.m4 $(top_srcdir)/build/xml.m4 \ + $(top_srcdir)/build/zlib.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/lib/ts/ink_autoconf.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +pkgdatadir = @pkgdatadir@ +pkglibdir = @pkglibdir@ +pkglibexecdir = @pkglibexecdir@ +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_CFLAGS = @AM_CFLAGS@ +AM_CXXFLAGS = @AM_CXXFLAGS@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +API_DEFS = @API_DEFS@ +AR = @AR@ +ASCPP = @ASCPP@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCACHE = @CCACHE@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL_CFLAGS = @CURL_CFLAGS@ +CURL_CONFIG = @CURL_CONFIG@ +CURL_LIBS = @CURL_LIBS@ +CURSES_LIB = @CURSES_LIB@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +EXPAT_LDFLAGS = @EXPAT_LDFLAGS@ +EXTRA_CC_LDFLAGS = @EXTRA_CC_LDFLAGS@ +EXTRA_CXX_LDFLAGS = @EXTRA_CXX_LDFLAGS@ +FGREP = @FGREP@ +FLEX_CFLAGS = @FLEX_CFLAGS@ +GEOIP_LIBS = @GEOIP_LIBS@ +GREP = @GREP@ +HOST_GUESS = @HOST_GUESS@ +HWLOC_CFLAGS = @HWLOC_CFLAGS@ +HWLOC_LIBS = @HWLOC_LIBS@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LEX = @LEX@ +LEXLIB = @LEXLIB@ +LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@ +LIBCAP = @LIBCAP@ +LIBCRYPT = @LIBCRYPT@ +LIBDEMANGLE = @LIBDEMANGLE@ +LIBEXPAT = @LIBEXPAT@ +LIBLZMA = @LIBLZMA@ +LIBOBJS = @LIBOBJS@ +LIBPCRE = @LIBPCRE@ +LIBPROFILER = @LIBPROFILER@ +LIBREADLINE = @LIBREADLINE@ +LIBRESOLV = @LIBRESOLV@ +LIBS = @LIBS@ +LIBTCL = @LIBTCL@ +LIBTOOL = @LIBTOOL@ +LIBTOOL_LINK_FLAGS = @LIBTOOL_LINK_FLAGS@ +LIBZ = @LIBZ@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LUA_LDFLAGS = @LUA_LDFLAGS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MGMT_DEFS = @MGMT_DEFS@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPENSSL_INCLUDES = @OPENSSL_INCLUDES@ +OPENSSL_LDFLAGS = @OPENSSL_LDFLAGS@ +OPENSSL_LIBS = @OPENSSL_LIBS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PCRE_CONFIG = @PCRE_CONFIG@ +PERL = @PERL@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PYTHON = @PYTHON@ +PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ +PYTHON_PLATFORM = @PYTHON_PLATFORM@ +PYTHON_PREFIX = @PYTHON_PREFIX@ +PYTHON_VERSION = @PYTHON_VERSION@ +RANLIB = @RANLIB@ +RM = @RM@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHARED_CFLAGS = @SHARED_CFLAGS@ +SHARED_CXXFLAGS = @SHARED_CXXFLAGS@ +SHARED_CXXLINKFLAGS = @SHARED_CXXLINKFLAGS@ +SHARED_LDFLAGS = @SHARED_LDFLAGS@ +SHELL = @SHELL@ +SPDYLAY_CFLAGS = @SPDYLAY_CFLAGS@ +SPDYLAY_LIBS = @SPDYLAY_LIBS@ +SPHINXBUILD = @SPHINXBUILD@ +SPHINXOPTS = @SPHINXOPTS@ +STRIP = @STRIP@ +TCL_BIN_DIR = @TCL_BIN_DIR@ +TCL_LIB_FILE = @TCL_LIB_FILE@ +TCL_LIB_FLAG = @TCL_LIB_FLAG@ +TCL_LIB_SPEC = @TCL_LIB_SPEC@ +TCL_PATCH_LEVEL = @TCL_PATCH_LEVEL@ +TCL_SRC_DIR = @TCL_SRC_DIR@ +TCL_STUB_LIB_FILE = @TCL_STUB_LIB_FILE@ +TCL_STUB_LIB_FLAG = @TCL_STUB_LIB_FLAG@ +TCL_STUB_LIB_SPEC = @TCL_STUB_LIB_SPEC@ +TCL_VERSION = @TCL_VERSION@ +TFW_PACKAGE_SUFFIX = @TFW_PACKAGE_SUFFIX@ +TS_LIBTOOL_VERSION = @TS_LIBTOOL_VERSION@ +TS_LUAJIT_LDFLAGS = @TS_LUAJIT_LDFLAGS@ +TS_MAN1_MANPAGES = @TS_MAN1_MANPAGES@ +TS_MAN3_MANPAGES = @TS_MAN3_MANPAGES@ +TS_MAN5_MANPAGES = @TS_MAN5_MANPAGES@ +TS_MAN8_MANPAGES = @TS_MAN8_MANPAGES@ +TS_VERSION_MAJOR = @TS_VERSION_MAJOR@ +TS_VERSION_MICRO = @TS_VERSION_MICRO@ +TS_VERSION_MINOR = @TS_VERSION_MINOR@ +TS_VERSION_NUMBER = @TS_VERSION_NUMBER@ +TS_VERSION_STRING = @TS_VERSION_STRING@ +VERSION = @VERSION@ +YACC = @YACC@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +ac_cv_sizeof_voidp = @ac_cv_sizeof_voidp@ +allocah = @allocah@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +arpa_ineth = @arpa_ineth@ +arpa_nameser_compath = @arpa_nameser_compath@ +arpa_nameserh = @arpa_nameserh@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_group = @build_group@ +build_machine = @build_machine@ +build_os = @build_os@ +build_person = @build_person@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +cpioh = @cpioh@ +ctypeh = @ctypeh@ +datadir = @datadir@ +datarootdir = @datarootdir@ +default_loopback_iface = @default_loopback_iface@ +defer_accept = @defer_accept@ +docdir = @docdir@ +dvidir = @dvidir@ +enable_remote_cov_commit = @enable_remote_cov_commit@ +endianh = @endianh@ +exec_prefix = @exec_prefix@ +execinfoh = @execinfoh@ +exp_bindir = @exp_bindir@ +exp_cachedir = @exp_cachedir@ +exp_datadir = @exp_datadir@ +exp_docdir = @exp_docdir@ +exp_exec_prefix = @exp_exec_prefix@ +exp_includedir = @exp_includedir@ +exp_infodir = @exp_infodir@ +exp_installbuilddir = @exp_installbuilddir@ +exp_libdir = @exp_libdir@ +exp_libexecdir = @exp_libexecdir@ +exp_localstatedir = @exp_localstatedir@ +exp_logdir = @exp_logdir@ +exp_mandir = @exp_mandir@ +exp_prefix = @exp_prefix@ +exp_runtimedir = @exp_runtimedir@ +exp_sbindir = @exp_sbindir@ +exp_sysconfdir = @exp_sysconfdir@ +expath = @expath@ +floath = @floath@ +gethostbyname_r_glibc2 = @gethostbyname_r_glibc2@ +has_128bit_cas = @has_128bit_cas@ +has_backtrace = @has_backtrace@ +has_in6_is_addr_unspecified = @has_in6_is_addr_unspecified@ +has_interim_cache = @has_interim_cache@ +has_ip_tos = @has_ip_tos@ +has_profiler = @has_profiler@ +has_so_mark = @has_so_mark@ +has_spdy = @has_spdy@ +has_tcmalloc = @has_tcmalloc@ +has_tests = @has_tests@ +has_wccp = @has_wccp@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +ifaddrsh = @ifaddrsh@ +includedir = @includedir@ +infodir = @infodir@ +ink_with_modules_local = @ink_with_modules_local@ +ink_with_modules_process = @ink_with_modules_process@ +install_sh = @install_sh@ +installbuilddir = @installbuilddir@ +iocore_include_dirs = @iocore_include_dirs@ +ip_transparent = @ip_transparent@ +jemalloch = @jemalloch@ +libdir = @libdir@ +libexecdir = @libexecdir@ +libgenh = @libgenh@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +lzmah = @lzmah@ +machine_endianh = @machine_endianh@ +malloch = @malloch@ +mandir = @mandir@ +mathh = @mathh@ +max_api_stats = @max_api_stats@ +max_event_threads = @max_event_threads@ +max_host_name_len = @max_host_name_len@ +max_threads_per_type = @max_threads_per_type@ +mkdir_p = @mkdir_p@ +net_ppp_defsh = @net_ppp_defsh@ +netdbh = @netdbh@ +netinet_in_systmh = @netinet_in_systmh@ +netinet_inh = @netinet_inh@ +netinet_ip_icmph = @netinet_ip_icmph@ +netinet_iph = @netinet_iph@ +netinet_tcph = @netinet_tcph@ +oldincludedir = @oldincludedir@ +pcre_pcreh = @pcre_pcreh@ +pcreh = @pcreh@ +pdfdir = @pdfdir@ +pkgbindir = @pkgbindir@ +pkgcachedir = @pkgcachedir@ +pkgdocdir = @pkgdocdir@ +pkglocalstatedir = @pkglocalstatedir@ +pkglogdir = @pkglogdir@ +pkgpyexecdir = @pkgpyexecdir@ +pkgpythondir = @pkgpythondir@ +pkgruntimedir = @pkgruntimedir@ +pkgsbindir = @pkgsbindir@ +pkgsysconfdir = @pkgsysconfdir@ +pkgsysgroup = @pkgsysgroup@ +pkgsysuser = @pkgsysuser@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pthread_nph = @pthread_nph@ +pyexecdir = @pyexecdir@ +pythondir = @pythondir@ +readline_readlineh = @readline_readlineh@ +rel_bindir = @rel_bindir@ +rel_cachedir = @rel_cachedir@ +rel_datadir = @rel_datadir@ +rel_docdir = @rel_docdir@ +rel_exec_prefix = @rel_exec_prefix@ +rel_includedir = @rel_includedir@ +rel_infodir = @rel_infodir@ +rel_installbuilddir = @rel_installbuilddir@ +rel_libdir = @rel_libdir@ +rel_libexecdir = @rel_libexecdir@ +rel_localstatedir = @rel_localstatedir@ +rel_logdir = @rel_logdir@ +rel_mandir = @rel_mandir@ +rel_prefix = @rel_prefix@ +rel_runtimedir = @rel_runtimedir@ +rel_sbindir = @rel_sbindir@ +rel_sysconfdir = @rel_sysconfdir@ +runtimedir = @runtimedir@ +sbindir = @sbindir@ +schedh = @schedh@ +sharedstatedir = @sharedstatedir@ +siginfoh = @siginfoh@ +srcdir = @srcdir@ +stroptsh = @stroptsh@ +sys_byteorderh = @sys_byteorderh@ +sys_cpuseth = @sys_cpuseth@ +sys_epollh = @sys_epollh@ +sys_eventh = @sys_eventh@ +sys_ioctlh = @sys_ioctlh@ +sys_mounth = @sys_mounth@ +sys_paramh = @sys_paramh@ +sys_pseth = @sys_pseth@ +sys_sockioh = @sys_sockioh@ +sys_sysctlh = @sys_sysctlh@ +sys_sysinfoh = @sys_sysinfoh@ +sys_sysmacrosh = @sys_sysmacrosh@ +sys_systeminfoh = @sys_systeminfoh@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +use_diags = @use_diags@ +use_epoll = @use_epoll@ +use_fast_sdk = @use_fast_sdk@ +use_freelist = @use_freelist@ +use_hwloc = @use_hwloc@ +use_kqueue = @use_kqueue@ +use_linux_native_aio = @use_linux_native_aio@ +use_port = @use_port@ +use_posix_cap = @use_posix_cap@ +use_reclaimable_freelist = @use_reclaimable_freelist@ +use_tls_alpn = @use_tls_alpn@ +use_tls_eckey = @use_tls_eckey@ +use_tls_npn = @use_tls_npn@ +use_tls_sni = @use_tls_sni@ +use_tproxy = @use_tproxy@ +valuesh = @valuesh@ +waith = @waith@ +zlibh = @zlibh@ +SUBDIRS = traffic_top traffic_cop traffic_line +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign cmd/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign cmd/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic clean-libtool cscopelist-am ctags \ + ctags-am distclean distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + installdirs-am maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am tags tags-am uninstall uninstall-am + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/cmd/traffic_cop/Makefile.am b/cmd/traffic_cop/Makefile.am new file mode 100644 index 00000000..482075af --- /dev/null +++ b/cmd/traffic_cop/Makefile.am @@ -0,0 +1,43 @@ +# +# Makefile.am for the Enterprise Management module. +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +AM_CPPFLAGS = $(iocore_include_dirs) \ + -I$(top_srcdir)/lib \ + -I$(top_srcdir)/lib/records \ + -I$(top_srcdir)/lib/ts \ + -I$(top_srcdir)/mgmt \ + -I$(top_srcdir)/mgmt/cluster \ + -I$(top_srcdir)/mgmt/api/include + +MGMT_DEFS = @MGMT_DEFS@ +DEFS += $(MGMT_DEFS) + +bin_PROGRAMS = traffic_cop + +traffic_cop_SOURCES = \ + traffic_cop.cc + +traffic_cop_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ + +traffic_cop_LDADD = \ + $(top_builddir)/mgmt/api/libtsmgmtshare.la \ + $(top_builddir)/mgmt/api/libtsmgmt.la \ + $(top_builddir)/lib/ts/libtsutil.la \ + $(top_builddir)/lib/records/librec4cop.a \ + @LIBRESOLV@ @OPENSSL_LIBS@ diff --git a/cmd/traffic_cop/Makefile.in b/cmd/traffic_cop/Makefile.in new file mode 100644 index 00000000..961a6048 --- /dev/null +++ b/cmd/traffic_cop/Makefile.in @@ -0,0 +1,905 @@ +# Makefile.in generated by automake 1.13.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Makefile.am for the Enterprise Management module. +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +bin_PROGRAMS = traffic_cop$(EXEEXT) +subdir = cmd/traffic_cop +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(top_srcdir)/build/aux/depcomp +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/ax_boost_base.m4 \ + $(top_srcdir)/build/ax_check_openssl.m4 \ + $(top_srcdir)/build/ax_compare_version.m4 \ + $(top_srcdir)/build/ax_compiler_vendor.m4 \ + $(top_srcdir)/build/ax_cxx_compile_stdcxx_11.m4 \ + $(top_srcdir)/build/ax_lib_curl.m4 \ + $(top_srcdir)/build/ax_lib_readline.m4 \ + $(top_srcdir)/build/ax_path_generic.m4 \ + $(top_srcdir)/build/ax_perl_modules.m4 \ + $(top_srcdir)/build/ax_with_curses.m4 \ + $(top_srcdir)/build/common.m4 $(top_srcdir)/build/crypto.m4 \ + $(top_srcdir)/build/jemalloc.m4 $(top_srcdir)/build/libtool.m4 \ + $(top_srcdir)/build/ltoptions.m4 \ + $(top_srcdir)/build/ltsugar.m4 \ + $(top_srcdir)/build/ltversion.m4 \ + $(top_srcdir)/build/lt~obsolete.m4 $(top_srcdir)/build/lzma.m4 \ + $(top_srcdir)/build/network.m4 $(top_srcdir)/build/pcre.m4 \ + $(top_srcdir)/build/pkg.m4 $(top_srcdir)/build/tcl.m4 \ + $(top_srcdir)/build/tcmalloc.m4 $(top_srcdir)/build/xml.m4 \ + $(top_srcdir)/build/zlib.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/lib/ts/ink_autoconf.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +PROGRAMS = $(bin_PROGRAMS) +am_traffic_cop_OBJECTS = traffic_cop.$(OBJEXT) +traffic_cop_OBJECTS = $(am_traffic_cop_OBJECTS) +traffic_cop_DEPENDENCIES = $(top_builddir)/mgmt/api/libtsmgmtshare.la \ + $(top_builddir)/mgmt/api/libtsmgmt.la \ + $(top_builddir)/lib/ts/libtsutil.la \ + $(top_builddir)/lib/records/librec4cop.a +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +traffic_cop_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(traffic_cop_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/lib/ts +depcomp = $(SHELL) $(top_srcdir)/build/aux/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +AM_V_CXX = $(am__v_CXX_@AM_V@) +am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) +am__v_CXX_0 = @echo " CXX " $@; +am__v_CXX_1 = +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) +am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) +am__v_CXXLD_0 = @echo " CXXLD " $@; +am__v_CXXLD_1 = +SOURCES = $(traffic_cop_SOURCES) +DIST_SOURCES = $(traffic_cop_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +pkgdatadir = @pkgdatadir@ +pkglibdir = @pkglibdir@ +pkglibexecdir = @pkglibexecdir@ +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_CFLAGS = @AM_CFLAGS@ +AM_CXXFLAGS = @AM_CXXFLAGS@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +API_DEFS = @API_DEFS@ +AR = @AR@ +ASCPP = @ASCPP@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCACHE = @CCACHE@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL_CFLAGS = @CURL_CFLAGS@ +CURL_CONFIG = @CURL_CONFIG@ +CURL_LIBS = @CURL_LIBS@ +CURSES_LIB = @CURSES_LIB@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ $(MGMT_DEFS) +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +EXPAT_LDFLAGS = @EXPAT_LDFLAGS@ +EXTRA_CC_LDFLAGS = @EXTRA_CC_LDFLAGS@ +EXTRA_CXX_LDFLAGS = @EXTRA_CXX_LDFLAGS@ +FGREP = @FGREP@ +FLEX_CFLAGS = @FLEX_CFLAGS@ +GEOIP_LIBS = @GEOIP_LIBS@ +GREP = @GREP@ +HOST_GUESS = @HOST_GUESS@ +HWLOC_CFLAGS = @HWLOC_CFLAGS@ +HWLOC_LIBS = @HWLOC_LIBS@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LEX = @LEX@ +LEXLIB = @LEXLIB@ +LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@ +LIBCAP = @LIBCAP@ +LIBCRYPT = @LIBCRYPT@ +LIBDEMANGLE = @LIBDEMANGLE@ +LIBEXPAT = @LIBEXPAT@ +LIBLZMA = @LIBLZMA@ +LIBOBJS = @LIBOBJS@ +LIBPCRE = @LIBPCRE@ +LIBPROFILER = @LIBPROFILER@ +LIBREADLINE = @LIBREADLINE@ +LIBRESOLV = @LIBRESOLV@ +LIBS = @LIBS@ +LIBTCL = @LIBTCL@ +LIBTOOL = @LIBTOOL@ +LIBTOOL_LINK_FLAGS = @LIBTOOL_LINK_FLAGS@ +LIBZ = @LIBZ@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LUA_LDFLAGS = @LUA_LDFLAGS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MGMT_DEFS = @MGMT_DEFS@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPENSSL_INCLUDES = @OPENSSL_INCLUDES@ +OPENSSL_LDFLAGS = @OPENSSL_LDFLAGS@ +OPENSSL_LIBS = @OPENSSL_LIBS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PCRE_CONFIG = @PCRE_CONFIG@ +PERL = @PERL@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PYTHON = @PYTHON@ +PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ +PYTHON_PLATFORM = @PYTHON_PLATFORM@ +PYTHON_PREFIX = @PYTHON_PREFIX@ +PYTHON_VERSION = @PYTHON_VERSION@ +RANLIB = @RANLIB@ +RM = @RM@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHARED_CFLAGS = @SHARED_CFLAGS@ +SHARED_CXXFLAGS = @SHARED_CXXFLAGS@ +SHARED_CXXLINKFLAGS = @SHARED_CXXLINKFLAGS@ +SHARED_LDFLAGS = @SHARED_LDFLAGS@ +SHELL = @SHELL@ +SPDYLAY_CFLAGS = @SPDYLAY_CFLAGS@ +SPDYLAY_LIBS = @SPDYLAY_LIBS@ +SPHINXBUILD = @SPHINXBUILD@ +SPHINXOPTS = @SPHINXOPTS@ +STRIP = @STRIP@ +TCL_BIN_DIR = @TCL_BIN_DIR@ +TCL_LIB_FILE = @TCL_LIB_FILE@ +TCL_LIB_FLAG = @TCL_LIB_FLAG@ +TCL_LIB_SPEC = @TCL_LIB_SPEC@ +TCL_PATCH_LEVEL = @TCL_PATCH_LEVEL@ +TCL_SRC_DIR = @TCL_SRC_DIR@ +TCL_STUB_LIB_FILE = @TCL_STUB_LIB_FILE@ +TCL_STUB_LIB_FLAG = @TCL_STUB_LIB_FLAG@ +TCL_STUB_LIB_SPEC = @TCL_STUB_LIB_SPEC@ +TCL_VERSION = @TCL_VERSION@ +TFW_PACKAGE_SUFFIX = @TFW_PACKAGE_SUFFIX@ +TS_LIBTOOL_VERSION = @TS_LIBTOOL_VERSION@ +TS_LUAJIT_LDFLAGS = @TS_LUAJIT_LDFLAGS@ +TS_MAN1_MANPAGES = @TS_MAN1_MANPAGES@ +TS_MAN3_MANPAGES = @TS_MAN3_MANPAGES@ +TS_MAN5_MANPAGES = @TS_MAN5_MANPAGES@ +TS_MAN8_MANPAGES = @TS_MAN8_MANPAGES@ +TS_VERSION_MAJOR = @TS_VERSION_MAJOR@ +TS_VERSION_MICRO = @TS_VERSION_MICRO@ +TS_VERSION_MINOR = @TS_VERSION_MINOR@ +TS_VERSION_NUMBER = @TS_VERSION_NUMBER@ +TS_VERSION_STRING = @TS_VERSION_STRING@ +VERSION = @VERSION@ +YACC = @YACC@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +ac_cv_sizeof_voidp = @ac_cv_sizeof_voidp@ +allocah = @allocah@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +arpa_ineth = @arpa_ineth@ +arpa_nameser_compath = @arpa_nameser_compath@ +arpa_nameserh = @arpa_nameserh@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_group = @build_group@ +build_machine = @build_machine@ +build_os = @build_os@ +build_person = @build_person@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +cpioh = @cpioh@ +ctypeh = @ctypeh@ +datadir = @datadir@ +datarootdir = @datarootdir@ +default_loopback_iface = @default_loopback_iface@ +defer_accept = @defer_accept@ +docdir = @docdir@ +dvidir = @dvidir@ +enable_remote_cov_commit = @enable_remote_cov_commit@ +endianh = @endianh@ +exec_prefix = @exec_prefix@ +execinfoh = @execinfoh@ +exp_bindir = @exp_bindir@ +exp_cachedir = @exp_cachedir@ +exp_datadir = @exp_datadir@ +exp_docdir = @exp_docdir@ +exp_exec_prefix = @exp_exec_prefix@ +exp_includedir = @exp_includedir@ +exp_infodir = @exp_infodir@ +exp_installbuilddir = @exp_installbuilddir@ +exp_libdir = @exp_libdir@ +exp_libexecdir = @exp_libexecdir@ +exp_localstatedir = @exp_localstatedir@ +exp_logdir = @exp_logdir@ +exp_mandir = @exp_mandir@ +exp_prefix = @exp_prefix@ +exp_runtimedir = @exp_runtimedir@ +exp_sbindir = @exp_sbindir@ +exp_sysconfdir = @exp_sysconfdir@ +expath = @expath@ +floath = @floath@ +gethostbyname_r_glibc2 = @gethostbyname_r_glibc2@ +has_128bit_cas = @has_128bit_cas@ +has_backtrace = @has_backtrace@ +has_in6_is_addr_unspecified = @has_in6_is_addr_unspecified@ +has_interim_cache = @has_interim_cache@ +has_ip_tos = @has_ip_tos@ +has_profiler = @has_profiler@ +has_so_mark = @has_so_mark@ +has_spdy = @has_spdy@ +has_tcmalloc = @has_tcmalloc@ +has_tests = @has_tests@ +has_wccp = @has_wccp@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +ifaddrsh = @ifaddrsh@ +includedir = @includedir@ +infodir = @infodir@ +ink_with_modules_local = @ink_with_modules_local@ +ink_with_modules_process = @ink_with_modules_process@ +install_sh = @install_sh@ +installbuilddir = @installbuilddir@ +iocore_include_dirs = @iocore_include_dirs@ +ip_transparent = @ip_transparent@ +jemalloch = @jemalloch@ +libdir = @libdir@ +libexecdir = @libexecdir@ +libgenh = @libgenh@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +lzmah = @lzmah@ +machine_endianh = @machine_endianh@ +malloch = @malloch@ +mandir = @mandir@ +mathh = @mathh@ +max_api_stats = @max_api_stats@ +max_event_threads = @max_event_threads@ +max_host_name_len = @max_host_name_len@ +max_threads_per_type = @max_threads_per_type@ +mkdir_p = @mkdir_p@ +net_ppp_defsh = @net_ppp_defsh@ +netdbh = @netdbh@ +netinet_in_systmh = @netinet_in_systmh@ +netinet_inh = @netinet_inh@ +netinet_ip_icmph = @netinet_ip_icmph@ +netinet_iph = @netinet_iph@ +netinet_tcph = @netinet_tcph@ +oldincludedir = @oldincludedir@ +pcre_pcreh = @pcre_pcreh@ +pcreh = @pcreh@ +pdfdir = @pdfdir@ +pkgbindir = @pkgbindir@ +pkgcachedir = @pkgcachedir@ +pkgdocdir = @pkgdocdir@ +pkglocalstatedir = @pkglocalstatedir@ +pkglogdir = @pkglogdir@ +pkgpyexecdir = @pkgpyexecdir@ +pkgpythondir = @pkgpythondir@ +pkgruntimedir = @pkgruntimedir@ +pkgsbindir = @pkgsbindir@ +pkgsysconfdir = @pkgsysconfdir@ +pkgsysgroup = @pkgsysgroup@ +pkgsysuser = @pkgsysuser@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pthread_nph = @pthread_nph@ +pyexecdir = @pyexecdir@ +pythondir = @pythondir@ +readline_readlineh = @readline_readlineh@ +rel_bindir = @rel_bindir@ +rel_cachedir = @rel_cachedir@ +rel_datadir = @rel_datadir@ +rel_docdir = @rel_docdir@ +rel_exec_prefix = @rel_exec_prefix@ +rel_includedir = @rel_includedir@ +rel_infodir = @rel_infodir@ +rel_installbuilddir = @rel_installbuilddir@ +rel_libdir = @rel_libdir@ +rel_libexecdir = @rel_libexecdir@ +rel_localstatedir = @rel_localstatedir@ +rel_logdir = @rel_logdir@ +rel_mandir = @rel_mandir@ +rel_prefix = @rel_prefix@ +rel_runtimedir = @rel_runtimedir@ +rel_sbindir = @rel_sbindir@ +rel_sysconfdir = @rel_sysconfdir@ +runtimedir = @runtimedir@ +sbindir = @sbindir@ +schedh = @schedh@ +sharedstatedir = @sharedstatedir@ +siginfoh = @siginfoh@ +srcdir = @srcdir@ +stroptsh = @stroptsh@ +sys_byteorderh = @sys_byteorderh@ +sys_cpuseth = @sys_cpuseth@ +sys_epollh = @sys_epollh@ +sys_eventh = @sys_eventh@ +sys_ioctlh = @sys_ioctlh@ +sys_mounth = @sys_mounth@ +sys_paramh = @sys_paramh@ +sys_pseth = @sys_pseth@ +sys_sockioh = @sys_sockioh@ +sys_sysctlh = @sys_sysctlh@ +sys_sysinfoh = @sys_sysinfoh@ +sys_sysmacrosh = @sys_sysmacrosh@ +sys_systeminfoh = @sys_systeminfoh@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +use_diags = @use_diags@ +use_epoll = @use_epoll@ +use_fast_sdk = @use_fast_sdk@ +use_freelist = @use_freelist@ +use_hwloc = @use_hwloc@ +use_kqueue = @use_kqueue@ +use_linux_native_aio = @use_linux_native_aio@ +use_port = @use_port@ +use_posix_cap = @use_posix_cap@ +use_reclaimable_freelist = @use_reclaimable_freelist@ +use_tls_alpn = @use_tls_alpn@ +use_tls_eckey = @use_tls_eckey@ +use_tls_npn = @use_tls_npn@ +use_tls_sni = @use_tls_sni@ +use_tproxy = @use_tproxy@ +valuesh = @valuesh@ +waith = @waith@ +zlibh = @zlibh@ +AM_CPPFLAGS = $(iocore_include_dirs) \ + -I$(top_srcdir)/lib \ + -I$(top_srcdir)/lib/records \ + -I$(top_srcdir)/lib/ts \ + -I$(top_srcdir)/mgmt \ + -I$(top_srcdir)/mgmt/cluster \ + -I$(top_srcdir)/mgmt/api/include + +traffic_cop_SOURCES = \ + traffic_cop.cc + +traffic_cop_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ +traffic_cop_LDADD = \ + $(top_builddir)/mgmt/api/libtsmgmtshare.la \ + $(top_builddir)/mgmt/api/libtsmgmt.la \ + $(top_builddir)/lib/ts/libtsutil.la \ + $(top_builddir)/lib/records/librec4cop.a \ + @LIBRESOLV@ @OPENSSL_LIBS@ + +all: all-am + +.SUFFIXES: +.SUFFIXES: .cc .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign cmd/traffic_cop/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign cmd/traffic_cop/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ + fi; \ + for p in $$list; do echo "$$p $$p"; done | \ + sed 's/$(EXEEXT)$$//' | \ + while read p p1; do if test -f $$p \ + || test -f $$p1 \ + ; then echo "$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n;h' \ + -e 's|.*|.|' \ + -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ + sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) files[d] = files[d] " " $$1; \ + else { print "f", $$3 "/" $$4, $$1; } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ + -e 's/$$/$(EXEEXT)/' \ + `; \ + test -n "$$list" || exit 0; \ + echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(bindir)" && rm -f $$files + +clean-binPROGRAMS: + @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list + +traffic_cop$(EXEEXT): $(traffic_cop_OBJECTS) $(traffic_cop_DEPENDENCIES) $(EXTRA_traffic_cop_DEPENDENCIES) + @rm -f traffic_cop$(EXEEXT) + $(AM_V_CXXLD)$(traffic_cop_LINK) $(traffic_cop_OBJECTS) $(traffic_cop_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/traffic_cop.Po@am__quote@ + +.cc.o: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< + +.cc.obj: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cc.lo: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-binPROGRAMS + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \ + clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ + ctags ctags-am distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-binPROGRAMS \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am uninstall-binPROGRAMS + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/cmd/traffic_cop/traffic_cop.cc b/cmd/traffic_cop/traffic_cop.cc new file mode 100644 index 00000000..50615f24 --- /dev/null +++ b/cmd/traffic_cop/traffic_cop.cc @@ -0,0 +1,1840 @@ +/** @file + + Main entry point for the traffic_cop application. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include "libts.h" +#include "I_Layout.h" +#include "I_Version.h" +#include "I_RecCore.h" +#include "mgmtapi.h" +#include "ClusterCom.h" + +#include +#include + +#if defined(linux) || defined (solaris) +#include "sys/utsname.h" +#include +#include +#include + +union semun +{ + int val; /* value for SETVAL */ + struct semid_ds *buf; /* buffer for IPC_STAT, IPC_SET */ + unsigned short int *array; /* array for GETALL, SETALL */ + struct seminfo *__buf; /* buffer for IPC_INFO */ +}; +#endif // linux check +#include + +static const long MAX_LOGIN = sysconf(_SC_LOGIN_NAME_MAX) <= 0 ? _POSIX_LOGIN_NAME_MAX : sysconf(_SC_LOGIN_NAME_MAX); + +#define OPTIONS_MAX 32 +#define OPTIONS_LEN_MAX 1024 + +#ifndef WAIT_ANY +#define WAIT_ANY (pid_t) -1 +#endif // !WAIT_ANY + +#define COP_FATAL LOG_ALERT +#define COP_WARNING LOG_ERR +#define COP_DEBUG LOG_DEBUG +#define COP_NOTICE LOG_NOTICE + +static const char *runtime_dir; +static char config_file[PATH_NAME_MAX]; + +static char cop_lockfile[PATH_NAME_MAX]; +static char manager_lockfile[PATH_NAME_MAX]; +static char server_lockfile[PATH_NAME_MAX]; + +static int check_memory_min_swapfree_kb = 0; +static int check_memory_min_memfree_kb = 0; + +static int syslog_facility = LOG_DAEMON; +static char syslog_fac_str[PATH_NAME_MAX] = "LOG_DAEMON"; + +static int killsig = SIGKILL; +static int coresig = 0; + +static int debug_flag = false; +static int stdout_flag = false; +static int version_flag = false; +static int stop_flag = false; + +static char* admin_user; +static uid_t admin_uid; +static gid_t admin_gid; +static bool admin_user_p = false; +static char manager_binary[PATH_NAME_MAX] = "traffic_manager"; +static char server_binary[PATH_NAME_MAX] = "traffic_server"; +static char manager_options[OPTIONS_LEN_MAX] = ""; + +static char log_file[PATH_NAME_MAX] = "traffic.out"; +static char bin_path[PATH_NAME_MAX] = "bin"; + +static int autoconf_port = 8083; +static int rs_port = 8088; +static MgmtClusterType cluster_type = NO_CLUSTER; +static int http_backdoor_port = 8084; + +#if defined(linux) +// TS-1075 : auto-port ::connect DoS on high traffic linux systems +static int source_port = 0; +#endif + +static int manager_failures = 0; +static int server_failures = 0; +static int server_not_found = 0; + +static const int sleep_time = 10; // 10 sec +static const int manager_timeout = 3 * 60; // 3 min +static const int server_timeout = 3 * 60; // 3 min + +// traffic_manager flap detection +#define MANAGER_FLAP_DETECTION 1 +#if defined(MANAGER_FLAP_DETECTION) +#define MANAGER_MAX_FLAP_COUNT 3 // if flap this many times, give up for a while +#define MANAGER_FLAP_INTERVAL_MSEC 60000 // if x number of flaps happen in this interval, declare flapping +#define MANAGER_FLAP_RETRY_MSEC 60000 // if flapping, don't try to restart until after this retry duration +static bool manager_flapping = false; // is the manager flapping? +static int manager_flap_count = 0; // how many times has the manager flapped? +static ink_hrtime manager_flap_interval_start_time = 0; // first time we attempted to start the manager in past little while) +static ink_hrtime manager_flap_retry_start_time = 0; // first time we attempted to start the manager in past little while) +#endif + +// transient syscall error timeout +#define TRANSIENT_ERROR_WAIT_MS 500 + +static const int kill_timeout = 1 * 60; // 1 min + +static int child_pid = 0; +static int child_status = 0; +static int sem_id = 11452; + +AppVersionInfo appVersionInfo; + +static char const localhost[] = "127.0.0.1"; + +static void cop_log(int priority, const char *format, ...) TS_PRINTFLIKE(2, 3); + +static void get_admin_user(void); + +struct ConfigValue +{ + ConfigValue(RecT _t, RecDataT _d, const std::string& _v) + : config_type(_t), data_type(_d), data_value(_v) { + } + + RecT config_type; + RecDataT data_type; + std::string data_value; +}; + +typedef std::map ConfigValueTable; +static ConfigValueTable configTable; + +#define cop_log_trace(...) do { if (debug_flag) cop_log(COP_DEBUG, __VA_ARGS__); } while (0) + +static const char * +priority_name(int priority) +{ + switch (priority) { + case COP_DEBUG: return "DEBUG"; + case COP_WARNING: return "WARNING"; + case COP_FATAL: return "FATAL"; + case COP_NOTICE: return "NOTICE"; + default: return "unknown"; + } +} + +static void +cop_log(int priority, const char *format, ...) +{ + va_list args; + + va_start(args, format); + + if (stdout_flag) { + struct timeval now; + double now_f; + + gettimeofday(&now, NULL); + now_f = now.tv_sec + now.tv_usec / 1000000.0f; + + fprintf(stdout, "<%.4f> [%s]: ", now_f, priority_name(priority)); + vfprintf(stdout, format, args); + fflush(stdout); + } else { + char buffer[8192]; + vsprintf(buffer, format, args); + syslog(priority, "%s", buffer); + } + + va_end(args); +} + + +void +chown_file_to_admin_user(const char *file) { + if (admin_user_p) { + if (chown(file, admin_uid, admin_gid) < 0 && errno != ENOENT) { + cop_log(COP_FATAL, "cop couldn't chown the file: '%s' for '%s' (%d/%d) : [%d] %s\n", + file, admin_user, admin_uid, admin_gid, errno, strerror(errno)); + } + } +} + +static void +sig_child(int signum) +{ + pid_t pid = 0; + int status = 0; + + cop_log_trace("Entering sig_child(%d)\n", signum); + for (;;) { + pid = waitpid(WAIT_ANY, &status, WNOHANG); + + if (pid <= 0) { + break; + } + // TSqa03086 - We can not log the child status signal from + // the signal handler since syslog can deadlock. Record + // the pid and the status in a global for logging + // next time through the event loop. We will occasionally + // lose some information if we get two sig childs in rapid + // succession + child_pid = pid; + child_status = status; + } + cop_log_trace("Leaving sig_child(%d)\n", signum); +} + +static void +sig_term(int signum) +{ + pid_t pid = 0; + int status = 0; + + //killsig = SIGTERM; + + cop_log_trace("Entering sig_term(%d)\n", signum); + + // safely^W commit suicide. + cop_log_trace("Sending signal %d to entire group\n", signum); + killpg(0, signum); + + cop_log_trace("Waiting for children to exit."); + + for (;;) { + pid = waitpid(WAIT_ANY, &status, WNOHANG); + + if (pid <= 0) { + break; + } + // TSqa03086 - We can not log the child status signal from + // the signal handler since syslog can deadlock. Record + // the pid and the status in a global for logging + // next time through the event loop. We will occasionally + // lose some information if we get two sig childs in rapid + // succession + child_pid = pid; + child_status = status; + } + cop_log_trace("Leaving sig_term(%d), exiting traffic_cop\n", signum); + exit(0); +} + +static void +#if defined(solaris) +sig_fatal(int signum, siginfo_t * t, void *c) +#else +sig_fatal(int signum) +#endif +{ + cop_log_trace("Entering sig_fatal(%d)\n", signum); +#if defined(solaris) + if (t) { + if (t->si_code <= 0) { + cop_log(COP_FATAL, "cop received fatal user signal [%d] from" + " pid [%d] uid [%d]\n", signum, (int)(t->si_pid), t->si_uid); + } else { + cop_log(COP_FATAL, "cop received fatal kernel signal [%d], " "reason [%d]\n", signum, t->si_code); + } + } else { +#endif + cop_log(COP_FATAL, "cop received fatal signal [%d]\n", signum); +#if defined(solaris) + } +#endif + cop_log_trace("Leaving sig_fatal(%d)\n", signum); + abort(); +} + +static void +#if defined(solaris) +sig_alarm_warn(int signum, siginfo_t * t, void *c) +#else +sig_alarm_warn(int signum) +#endif +{ + cop_log_trace("Entering sig_alarm_warn(%d)\n", signum); + cop_log(COP_WARNING, "unable to kill traffic_server for the last" " %d seconds\n", kill_timeout); + + // Set us up for another alarm + alarm(kill_timeout); + cop_log_trace("Leaving sig_alarm_warn(%d)\n", signum); +} + +static void +sig_ignore(int signum) +{ + cop_log_trace("Entering sig_ignore(%d)\n", signum); + // No code here yet... + cop_log_trace("Leaving sig_ignore(%d)\n", signum); +} + +static void +set_alarm_death() +{ + struct sigaction action; + + cop_log_trace("Entering set_alarm_death()\n"); +#if defined(solaris) + action.sa_handler = NULL; + action.sa_sigaction = sig_fatal; + sigemptyset(&action.sa_mask); + action.sa_flags = SA_SIGINFO; +#else + action.sa_handler = sig_fatal; + sigemptyset(&action.sa_mask); + action.sa_flags = 0; +#endif + + sigaction(SIGALRM, &action, NULL); + cop_log_trace("Leaving set_alarm_death()\n"); +} + +static void +set_alarm_warn() +{ + struct sigaction action; + + cop_log_trace("Entering set_alarm_warn()\n"); +#if defined(solaris) + action.sa_handler = NULL; + action.sa_sigaction = sig_alarm_warn; + sigemptyset(&action.sa_mask); + action.sa_flags = SA_SIGINFO; +#else + action.sa_handler = sig_alarm_warn; + sigemptyset(&action.sa_mask); + action.sa_flags = 0; +#endif + + sigaction(SIGALRM, &action, NULL); + cop_log_trace("Leaving set_alarm_warn()\n"); + +} + +static void +process_syslog_config(void) +{ + int new_fac; + + cop_log_trace("Entering process_syslog_config()\n"); + new_fac = facility_string_to_int(syslog_fac_str); + + if (new_fac >= 0 && new_fac != syslog_facility) { + closelog(); + openlog("traffic_cop", LOG_PID | LOG_NDELAY | LOG_NOWAIT, new_fac); + syslog_facility = new_fac; + } + cop_log_trace("Leaving process_syslog_config()\n"); +} + +// Paranoia: wrap the process termination call within alarms +// so that when the killing call doesn't return we +// will still wake up +static void +safe_kill(const char *lockfile_name, const char *pname, bool group) +{ + Lockfile lockfile(lockfile_name); + chown_file_to_admin_user(lockfile_name); + + cop_log_trace("Entering safe_kill(%s, %s, %d)\n", lockfile_name, pname, group); + set_alarm_warn(); + alarm(kill_timeout); + + if (group == true) { + lockfile.KillGroup(killsig, coresig, pname); + } else { + lockfile.Kill(killsig, coresig, pname); + } + chown_file_to_admin_user(lockfile_name); + + alarm(0); + set_alarm_death(); + cop_log_trace("Leaving safe_kill(%s, %s, %d)\n", lockfile_name, pname, group); +} + + +// ink_hrtime milliseconds() +// +// Returns the result of gettimeofday converted to +// one 64bit int +// +static ink_hrtime +milliseconds(void) +{ + struct timeval curTime; + + cop_log_trace("Entering milliseconds()\n"); + ink_gethrtimeofday(&curTime, NULL); + // Make liberal use of casting to ink_hrtime to ensure the + // compiler does not truncate our result + cop_log_trace("Leaving milliseconds()\n"); + return ((ink_hrtime) curTime.tv_sec * 1000) + ((ink_hrtime) curTime.tv_usec / 1000); +} + +static void +millisleep(int ms) +{ + struct timespec ts; + + cop_log_trace("Entering millisleep(%d)\n", ms); + ts.tv_sec = ms / 1000; + ts.tv_nsec = (ms - ts.tv_sec * 1000) * 1000 * 1000; + nanosleep(&ts, NULL); + cop_log_trace("Leaving millisleep(%d)\n", ms); +} + +static bool +transient_error(int error, int wait_ms) +{ + cop_log_trace("Entering transient_error(%d, %d)\n", error, wait_ms); + + // switch cases originally from UnixNex::accept_error_seriousness() + switch (error) { + case EAGAIN: + case EINTR: + break; + + case ENFILE: + case EMFILE: + case ENOMEM: +#ifdef ENOBUFS + case ENOBUFS: +#endif +#if defined(ENOSR) && !defined(freebsd) && !defined(darwin) + case ENOSR: +#endif + if (wait_ms) + millisleep(wait_ms); + break; + + default: + cop_log_trace("Leaving transient_error(%d, %d) --> false\n", error, wait_ms); + return false; + } + cop_log_trace("Leaving transient_error(%d, %d) --> true\n", error, wait_ms); + return true; +} + +static void +config_register_variable(RecT rec_type, RecDataT data_type, const char * name, const char * value, bool /* inc_version */) +{ + configTable.insert(std::make_pair(std::string(name), ConfigValue(rec_type, data_type, value))); +} + +static void +config_read_string(const char *name, char *val, size_t val_len, bool miss_ok = false) +{ + ConfigValueTable::const_iterator config; + + config = configTable.find(name); + if (config == configTable.end()) { + if (miss_ok) + return; + else + goto ConfigStrFatalError; + } + + if (config->second.data_type != RECD_STRING) { + goto ConfigStrFatalError; + } + + ink_strlcpy(val, RecConfigOverrideFromEnvironment(name, config->second.data_value.c_str()), val_len); + return; + +ConfigStrFatalError: + cop_log(COP_FATAL, "could not find string variable %s in records.config\n", name); + exit(1); +} + +static void +config_read_int(const char *name, int *val, bool miss_ok = false) +{ + ConfigValueTable::const_iterator config; + + config = configTable.find(name); + if (config == configTable.end()) { + if (miss_ok) + return; + else + goto ConfigIntFatalError; + } + + if (config->second.data_type != RECD_INT) { + goto ConfigIntFatalError; + } + + *val = atoi(RecConfigOverrideFromEnvironment(name, config->second.data_value.c_str())); + return; + +ConfigIntFatalError: + cop_log(COP_FATAL, "could not find integer variable %s in records.config\n", name); + exit(1); +} + +static const char * +config_read_runtime_dir() +{ + char state_dir[PATH_NAME_MAX + 1]; + + state_dir[0] = '\0'; + config_read_string("proxy.config.local_state_dir", state_dir, sizeof(state_dir), true); + if (strlen(state_dir) > 0) { + return Layout::get()->relative(state_dir); + } else { + return ats_strdup(Layout::get()->runtimedir); + } +} + +static void +config_reload_records() +{ + struct stat stat_buf; + static time_t last_mod = 0; + char log_dir[PATH_NAME_MAX]; + char log_filename[PATH_NAME_MAX]; + int tmp_int; + + cop_log_trace("Entering %s()\n", __func__); + // coverity[fs_check_call] + if (stat(config_file, &stat_buf) == -1) { + cop_log(COP_FATAL, "could not stat \"%s\"\n", config_file); + exit(1); + } + + if (stat_buf.st_mtime <= last_mod) { // no change, no need to re-read + return; + } else { + last_mod = stat_buf.st_mtime; + } + + configTable.clear(); + + if (RecConfigFileParse(config_file, config_register_variable, false) != REC_ERR_OKAY) { + cop_log(COP_FATAL, "could not parse \"%s\"\n", config_file); + exit(1); + } + + config_read_string("proxy.config.manager_binary", manager_binary, sizeof(manager_binary), true); + config_read_string("proxy.config.proxy_binary", server_binary, sizeof(server_binary), true); + get_admin_user(); + + config_read_string("proxy.config.bin_path", bin_path, sizeof(bin_path), true); + Layout::get()->relative(bin_path, sizeof(bin_path), bin_path); + if (access(bin_path, R_OK) == -1) { + ink_strlcpy(bin_path, Layout::get()->bindir, sizeof(bin_path)); + if (access(bin_path, R_OK) == -1) { + cop_log(COP_FATAL, "could not access() \"%s\"\n", bin_path); + cop_log(COP_FATAL, "please set 'proxy.config.bin_path' \n"); + exit(1); + } + } + config_read_string("proxy.config.log.logfile_dir", log_dir, sizeof(log_dir)); + Layout::get()->relative(log_dir, sizeof(log_dir), log_dir); + if (access(log_dir, W_OK) == -1) { + ink_strlcpy(log_dir, Layout::get()->logdir, sizeof(log_dir)); + if (access(log_dir, W_OK) == -1) { + cop_log(COP_FATAL, "could not access() \"%s\"\n", log_dir); + cop_log(COP_FATAL, "please set 'proxy.config.log.logfile_dir' \n"); + exit(1); + } + } + config_read_string("proxy.config.output.logfile", log_filename, sizeof(log_filename)); + Layout::relative_to(log_file, sizeof(log_file), log_dir, log_filename); + config_read_int("proxy.config.process_manager.mgmt_port", &http_backdoor_port, true); + config_read_int("proxy.config.admin.autoconf_port", &autoconf_port, true); + config_read_int("proxy.config.cluster.rsport", &rs_port, true); + config_read_int("proxy.config.lm.sem_id", &sem_id, true); + +#if defined(linux) + // TS-1075 : auto-port ::connect DoS on high traffic linux systems + config_read_int("proxy.config.cop.source_port", &source_port, true); +#endif + + config_read_int("proxy.local.cluster.type", &tmp_int); + cluster_type = static_cast(tmp_int); + + if (stdout_flag) { + config_read_string("proxy.config.syslog_facility", syslog_fac_str, sizeof(syslog_fac_str), true); + process_syslog_config(); + } + + config_read_int("proxy.config.cop.core_signal", &coresig, true); + + config_read_int("proxy.config.cop.linux_min_swapfree_kb", &check_memory_min_swapfree_kb, true); + config_read_int("proxy.config.cop.linux_min_memfree_kb", &check_memory_min_memfree_kb, true); + + cop_log_trace("Leaving %s()\n", __func__); +} + +static void +get_admin_user() +{ + struct passwd *pwd = NULL; + + if (!admin_user) + admin_user = (char *)ats_malloc(MAX_LOGIN); + + config_read_string("proxy.config.admin.user_id", admin_user, MAX_LOGIN); + + if (*admin_user) { + char *end = admin_user + strlen(admin_user) - 1; + + // Trim trailing spaces. + while (end >= admin_user && isspace(*end)) + end--; + *(end + 1) = '\0'; + + if (*admin_user == '#') { + int uid = atoi(admin_user + 1); + if (uid == -1) { + // XXX: Can this call hapen after setuid? + uid = (int)geteuid(); + } + pwd = getpwuid((uid_t)uid); + } else { + pwd = getpwnam(admin_user); + } + + if (pwd) { + admin_uid = pwd->pw_uid; + admin_gid = pwd->pw_gid; + admin_user_p = true; + } else { + cop_log(COP_FATAL, "can't get passwd entry for the admin user '%s' - [%d] %s\n", admin_user, errno, strerror(errno)); + exit(1); + } + } +} + +static void +spawn_manager() +{ + char prog[PATH_NAME_MAX]; + char *options[OPTIONS_MAX]; + char *last; + char *tok; + int log_fd; + int err; + int key; + + cop_log_trace("Entering spawn_manager()\n"); + // Clean up shared memory segments. + if (sem_id > 0) { + key = sem_id; + } else { + key = 11452; + } + for (;; key++) { + err = semget(key, 1, 0666); + if (err < 0) { + break; + } +#if defined(solaris) || defined(kfreebsd) || defined(unknown) + err = semctl(err, 1, IPC_RMID); +#else + union semun dummy_semun; + memset(&dummy_semun, 0, sizeof(dummy_semun)); + err = semctl(err, 1, IPC_RMID, dummy_semun); +#endif + if (err < 0) { + break; + } + } + + Layout::relative_to(prog, sizeof(prog), bin_path, manager_binary); + if (access(prog, R_OK | X_OK) == -1) { + cop_log(COP_FATAL, "unable to access() manager binary \"%s\" [%d '%s']\n", prog, errno, strerror(errno)); + exit(1); + } + + cop_log_trace("spawn_manager: Launching %s with options '%s'\n", prog, manager_options); + int i; + for (i = 0; i < OPTIONS_MAX; i++) { + options[i] = NULL; + } + options[0] = prog; + i = 1; + tok = strtok_r(manager_options, " ", &last); + options[i++] = tok; + if (tok != NULL) { + while (i < OPTIONS_MAX && (tok = strtok_r(NULL, " ", &last))) { + options[i++] = tok; + } + } + + // Move any traffic.out that we can not write to, out + // of the way (TSqa2232) + // coverity[fs_check_call] + if (access(log_file, W_OK) < 0 && errno == EACCES) { + char old_log_file[PATH_NAME_MAX]; + snprintf(old_log_file, sizeof(old_log_file), "%s.old", log_file); + // coverity[toctou] + rename(log_file, old_log_file); + cop_log(COP_WARNING, "rename %s to %s as it is not accessible.\n", log_file, old_log_file); + } + // coverity[toctou] + if ((log_fd = open(log_file, O_WRONLY | O_APPEND | O_CREAT, 0640)) < 0) { + cop_log(COP_WARNING, "unable to open log file \"%s\" [%d '%s']\n", log_file, errno, strerror(errno)); + } + + err = fork(); + if (err == 0) { + if (log_fd >= 0) { + dup2(log_fd, STDOUT_FILENO); + dup2(log_fd, STDERR_FILENO); + close(log_fd); + } + + err = execv(prog, options); + cop_log_trace("Somehow execv(%s, options, NULL) failed (%d)!\n", prog, err); + exit(1); + } else if (err == -1) { + cop_log(COP_FATAL, "unable to fork [%d '%s']\n", errno, strerror(errno)); + exit(1); + } + + if (log_fd >= 0) { + close(log_fd); + } + + manager_failures = 0; + cop_log_trace("Leaving spawn_manager()\n"); +} + + +static int +poll_read_or_write(int fd, int timeout, int inorout) +{ + struct pollfd info; + int err; + + info.fd = fd; + info.events = inorout; + info.revents = 0; + + do { + err = poll(&info, 1, timeout); + } while ((err < 0) && (transient_error(errno, TRANSIENT_ERROR_WAIT_MS))); + + if ((err > 0) && (info.revents & inorout)) { + return 1; + } + + return err; +} + +inline static int +poll_read(int fd, int timeout) +{ + return poll_read_or_write(fd, timeout, POLLIN); +} + +inline static int +poll_write(int fd, int timeout) +{ + return poll_read_or_write(fd, timeout, POLLOUT); +} + +static int +open_socket(int port, const char *ip = NULL, char const *ip_to_bind = NULL) +{ + + int sock = 0; + struct addrinfo hints; + struct addrinfo *result = NULL; + struct addrinfo *result_to_bind = NULL; + char port_str[8] = {'\0'}; + int err = 0; + + cop_log_trace("Entering open_socket(%d, %s, %s)\n", port, ip, ip_to_bind); + if (!ip) { + ip = localhost; + } + +#if defined(linux) + // TS-1075 : auto-port ::connect DoS on high traffic linux systems + // unbound connections are "unsafe" in high connection count environments + if (!ip_to_bind) { + ip = localhost; + } +#endif + + snprintf(port_str, sizeof(port_str), "%d", port); + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + + err = getaddrinfo(ip, port_str, &hints, &result); + if (err != 0) { + cop_log (COP_WARNING, "(test) unable to get address info [%d %s] at ip %s, port %s\n", err, gai_strerror(err), ip, port_str); + goto getaddrinfo_error; + } + + // Create a socket + do { + sock = socket(result->ai_family, result->ai_socktype, 0); + } while ((sock < 0) && (transient_error(errno, TRANSIENT_ERROR_WAIT_MS))); + + if (sock < 0) { + cop_log(COP_WARNING, "(test) unable to create socket [%d '%s']\n", errno, strerror(errno)); + goto error; + } + + if (ip_to_bind) { + memset(&hints, 0, sizeof(hints)); + hints.ai_family = result->ai_family; + hints.ai_socktype = result->ai_socktype; + + err = getaddrinfo(ip_to_bind, NULL, &hints, &result_to_bind); + if (err != 0) { + cop_log (COP_WARNING, "(test) unable to get address info [%d %s] at ip %s\n", err, gai_strerror(err), ip_to_bind); + freeaddrinfo(result_to_bind); + goto error; + } + +#if defined(linux) + // TS-1075 : auto-port ::connect DoS on high traffic linux systems + // Bash the port on ::bind so that we always use the same port + if (0 != source_port) { + if (result_to_bind->ai_addr->sa_family == AF_INET) { + ((sockaddr_in *)result_to_bind->ai_addr)->sin_port = htons(source_port); + } else { + ((sockaddr_in6 *)result_to_bind->ai_addr)->sin6_port = htons(source_port); + } + + // also set REUSEADDR so that previous cop connections in the TIME_WAIT state + // do not interfere + if (safe_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, SOCKOPT_ON, sizeof(int)) < 0) { + cop_log (COP_WARNING, "(test) unable to set REUSEADDR socket option [%d '%s']\n", errno, strerror (errno)); + } + } +#endif + + if (safe_bind(sock, result_to_bind->ai_addr, result_to_bind->ai_addrlen) < 0) { + cop_log (COP_WARNING, "(test) unable to bind socket [%d '%s']\n", errno, strerror (errno)); + } + + freeaddrinfo(result_to_bind); + } + + // Put the socket in non-blocking mode...just to be extra careful + // that we never block. + do { + err = fcntl(sock, F_SETFL, O_NONBLOCK); + } while ((err < 0) && (transient_error(errno, TRANSIENT_ERROR_WAIT_MS))); + + if (err < 0) { + cop_log(COP_WARNING, "(test) unable to put socket in non-blocking mode [%d '%s']\n", errno, strerror(errno)); + goto error; + } + // Connect to the specified port on the machine we're running on. + do { + err = connect(sock, result->ai_addr, result->ai_addrlen); + } while ((err < 0) && (transient_error(errno, TRANSIENT_ERROR_WAIT_MS))); + + if ((err < 0) && (errno != EINPROGRESS)) { + cop_log(COP_WARNING, "(test) unable to connect to server [%d '%s'] at port %d\n", errno, strerror(errno), port); + goto error; + } + cop_log_trace("Leaving open_socket(%d, %s, %s) --> %d\n", port, ip, ip_to_bind, sock); + freeaddrinfo(result); + return sock; + +error: + if (sock >= 0) { + close_socket(sock); + } + cop_log_trace("Leaving open_socket(%d, %s, %s) --> %d\n", port, ip, ip_to_bind, -1); +getaddrinfo_error: + freeaddrinfo(result); + return -1; +} + +static int +test_port(int port, const char *request, char *buffer, int bufsize, + int64_t test_timeout, char const *ip = NULL, char const *ip_to_bind = NULL) +{ + int64_t start_time, timeout; + int sock; + size_t length = strlen(request); + int64_t err, idx; + + start_time = milliseconds(); + + sock = open_socket(port, ip, ip_to_bind); + if (sock < 0) { + goto error; + } + + timeout = milliseconds() - start_time; + if (timeout >= test_timeout) { + cop_log(COP_WARNING, "(test) timeout occurred [%" PRId64 " ms]\n", timeout); + goto error; + } + timeout = test_timeout - timeout; + + err = poll_write(sock, timeout); + if (err < 0) { + cop_log(COP_WARNING, "(test) poll write failed [%d '%s']\n", errno, strerror(errno)); + goto error; + } else if (err == 0) { + cop_log(COP_WARNING, "(test) write timeout [%" PRId64 " ms]\n", timeout); + goto error; + } + // Write the request to the server. + while (length > 0) { + do { + err = write(sock, request, length); + } while ((err < 0) && (transient_error(errno, TRANSIENT_ERROR_WAIT_MS))); + + if (err < 0) { + cop_log(COP_WARNING, "(test) write failed [%d '%s']\n", errno, strerror(errno)); + goto error; + } + + request += err; + length -= err; + } + + idx = 0; + for (;;) { + if (idx >= bufsize) { + cop_log(COP_WARNING, "(test) response is too large [%" PRId64 "]\n", idx); + goto error; + } + + timeout = milliseconds() - start_time; + if (timeout >= test_timeout) { + cop_log(COP_WARNING, "(test) timeout occurred [%" PRId64 " ms]\n", timeout); + goto error; + } + timeout = test_timeout - timeout; + + err = poll_read(sock, timeout); + if (err < 0) { + cop_log(COP_WARNING, "(test) poll read failed [%d '%s']\n", errno, strerror(errno)); + goto error; + } else if (err == 0) { + cop_log(COP_WARNING, "(test) read timeout [%" PRId64 " ]\n", timeout); + goto error; + } + + do { + err = read(sock, &buffer[idx], bufsize - idx); + } while ((err < 0) && (transient_error(errno, TRANSIENT_ERROR_WAIT_MS))); + + if (err < 0) { + cop_log(COP_WARNING, "(test) read failed [%d '%s']\n", errno, strerror(errno)); + goto error; + } else if (err == 0) { + buffer[idx] = '\0'; + close(sock); + return 0; + } else { + idx += err; + } + } + +error: + if (sock >= 0) { + close_socket(sock); + } + return -1; +} + +static int +read_manager_string(const char *variable, char *value, size_t val_len) +{ + char buffer[4096]; + char request[1024]; + char *p, *e; + int err; + + snprintf(request, sizeof(request), "read %s\n", variable); + + err = test_port(rs_port, request, buffer, 4095, manager_timeout * 1000); + if (err < 0) { + return err; + } + + p = strstr(buffer, variable); + if (!p) { + cop_log(COP_WARNING, "(manager test) could not find record name in response\n"); + return -1; + } + p += strlen(variable); + + p = strstr(p, "Val:"); + if (!p) { + cop_log(COP_WARNING, "(manager test) could not find record value in response\n"); + return -1; + } + p += sizeof("Val:") - 1; + + while (*p && (*p != '\'')) { + p += 1; + } + + if (*p == '\0') { + cop_log(COP_WARNING, "(manager test) could not find properly delimited value in response\n"); + return -1; + } + p += 1; + + e = p; + while (*e && (*e != '\'')) { + e += 1; + } + + if (*e != '\'') { + cop_log(COP_WARNING, "(manager test) could not find properly delimited value in response\n"); + return -1; + } + + ink_strlcpy(value, p, MIN((size_t)(e - p + 1), val_len)); + + return 0; +} + +static int +read_manager_int(const char *variable, int *value) +{ + char buffer[4096]; + char request[1024]; + char *p; + int err; + + snprintf(request, sizeof(request), "read %s\n", variable); + + err = test_port(rs_port, request, buffer, 4095, manager_timeout * 1000); + if (err < 0) { + return err; + } + + p = strstr(buffer, variable); + if (!p) { + cop_log(COP_WARNING, "(manager test) could not find record name in response\n"); + return -1; + } + p += strlen(variable); + + p = strstr(p, "Val:"); + if (!p) { + cop_log(COP_WARNING, "(manager test) could not find record value in response\n"); + return -1; + } + p += sizeof("Val:") - 1; + + while (*p && (*p != '\'')) { + p += 1; + } + + if (*p == '\0') { + cop_log(COP_WARNING, "(manager test) could not find properly delimited value in response\n"); + return -1; + } + p += 1; + + *value = 0; + while (isdigit(*p)) { + *value = *value * 10 + (*p - '0'); + p += 1; + } + + if (*p != '\'') { + cop_log(COP_WARNING, "(manager test) could not find properly delimited value in response\n"); + return -1; + } + return 0; +} + +static int +read_mgmt_cli_int(const char *variable, int *value) +{ + TSInt val; + + if (TSRecordGetInt(variable, &val) != TS_ERR_OKAY) { + cop_log(COP_WARNING, "(cli test) could not communicate with mgmt cli\n"); + return -1; + } + *value = val; + return 0; +} + + +static int +test_rs_port() +{ + char buffer[4096]; + int err; + + err = read_manager_string("proxy.config.manager_binary", buffer, sizeof(buffer)); + if (err < 0) { + return err; + } + + if (strcmp(buffer, manager_binary) != 0) { + cop_log(COP_WARNING, "(manager test) bad response value\n"); + return -1; + } + + return 0; +} + + +static int +test_mgmt_cli_port() +{ + TSString val = NULL; + int ret = 0; + + if (TSRecordGetString("proxy.config.manager_binary", &val) != TS_ERR_OKAY) { + cop_log(COP_WARNING, "(cli test) unable to retrieve manager_binary\n"); + ret = -1; + } else { + if (strcmp(val, manager_binary) != 0) { + cop_log(COP_WARNING, "(cli test) bad response value, got %s, expected %s\n", val, manager_binary); + ret = -1; + } + } + + if (val) + TSfree(val); + return ret; +} + + +static int +test_http_port(int port, char *request, int timeout, char const *ip = NULL, char const *ip_to_bind = NULL) +{ + char buffer[4096]; + char *p; + int err; + + err = test_port(port, request, buffer, 4095, timeout, ip, ip_to_bind); + if (err < 0) { + return err; + } + + p = buffer; + + if (strncmp(p, "HTTP/", 5) != 0) { + cop_log(COP_WARNING, "(http test) received malformed response\n"); + return -1; + } + + p += 5; + while (*p && !isspace(*p)) { + p += 1; + } + + while (*p && isspace(*p)) { + p += 1; + } + + if (strncmp(p, "200", 3) != 0) { + char pstatus[4] = { 0 }; + ink_strlcpy(pstatus, p, sizeof(pstatus)); + cop_log(COP_WARNING, "(http test) received non-200 status(%s)\n", pstatus); + return -1; + } + + p = strstr(p, "\r\n\r\n"); + if (!p) { + cop_log(COP_WARNING, "(http test) could not find end of header\n"); + return -1; + } + + p += 4; + while (*p) { + if (strncmp(p, "abcdefghijklmnopqrstuvwxyz", 26) != 0) { + cop_log(COP_WARNING, "(http test) corrupted response data\n"); + return -1; + } + + p += 26; + while (*p && (*p != '\n')) { + p += 1; + } + p += 1; + } + + return 0; +} + +static int +test_server_http_port() +{ + char request[1024] = {'\0'}; + + // Generate a request for a the 'synthetic.txt' document the manager + // servers up on the autoconf port. + snprintf(request, sizeof(request), "GET http://127.0.0.1:%d/synthetic.txt HTTP/1.0\r\n\r\n", autoconf_port); + + return test_http_port(http_backdoor_port, request, server_timeout * 1000, localhost, localhost); +} + +static int +heartbeat_manager() +{ + int err; + + cop_log_trace("Entering heartbeat_manager()\n"); + // the CLI, and the rsport if cluster is enabled. + err = test_mgmt_cli_port(); + if ((0 == err) && (cluster_type != NO_CLUSTER)) + err = test_rs_port(); + + if (err < 0) { + // See heartbeat_server()'s comments for how we determine a server/manager failure. + manager_failures += 1; + cop_log(COP_WARNING, "manager heartbeat [variable] failed [%d]\n", manager_failures); + + if (manager_failures > 1) { + manager_failures = 0; + cop_log(COP_WARNING, "killing manager\n"); + safe_kill(manager_lockfile, manager_binary, true); + } + cop_log_trace("Leaving heartbeat_manager() --> %d\n", err); + return err; + } + + cop_log_trace("Leaving heartbeat_manager() --> %d\n", err); + return err; +} + +static int +heartbeat_server() +{ + int err; + + cop_log_trace("Entering heartbeat_server()\n"); + err = test_server_http_port(); + + if (err < 0) { + // If the test failed, increment the count of the number of + // failures. We don't kill the server the first time the test + // fails because we might just have gotten caught in a race + // where we decided to do the test because we thought the + // server was up even though somebody was in the process of + // bringing it down. The "server_up" function will reset + // 'server_failures' if it determines the server is down. + + server_failures += 1; + cop_log(COP_WARNING, "server heartbeat failed [%d]\n", server_failures); + + // If this is the second time that the server test has failed + // we kill the server. + if (server_failures > 1) { + server_failures = 0; + cop_log(COP_WARNING, "killing server\n"); + + // TSqa02622: Change the ALRM signal handler while + // trying to kill the process since if a core + // is being written, it could take a long time + // Set a new alarm so that we can print warnings + // if it is taking too long to kill the server + // + safe_kill(server_lockfile, server_binary, false); + } + } else { + if (server_failures) + cop_log(COP_WARNING, "server heartbeat succeeded\n"); + server_failures = 0; + } + + cop_log_trace("Leaving heartbeat_server() --> %d\n", err); + return err; +} + +static int +server_up() +{ + static int old_val = 0; + int val = -1; + int err; + + cop_log_trace("Entering server_up()\n"); + if (cluster_type != NO_CLUSTER) { + err = read_manager_int("proxy.node.proxy_running", &val); + } else { + err = read_mgmt_cli_int("proxy.node.proxy_running", &val); + } + + if (err < 0) { + cop_log(COP_WARNING, "could not contact manager, assuming server is down\n"); + cop_log_trace("Leaving server_up() --> 0\n"); + return 0; + } + + if (val != old_val) { + server_failures = 0; + server_not_found = 0; + old_val = val; + } + + if (val == 1) { + cop_log_trace("Leaving server_up() --> 1\n"); + return 1; + } else { + cop_log_trace("Leaving server_up() --> 0\n"); + return 0; + } +} + + +// | state | status | action +// --------|---------|----------|--------------- +// manager | up | ok | nothing +// server | up | ok | +// --------|---------|----------|--------------- +// manager | up | bad | kill manager +// server | up | ? | +// --------|---------|----------|--------------- +// manager | up | ok | kill manager +// server | down | ? | +// --------|---------|----------|--------------- +// manager | up | ok | kill server +// server | up | bad | + + +static void +check_programs() +{ + int err; + pid_t holding_pid; + + cop_log_trace("Entering check_programs()\n"); + + // Try to get the manager lock file. If we succeed in doing this, + // it means there is no manager running. + Lockfile manager_lf(manager_lockfile); + err = manager_lf.Open(&holding_pid); + chown_file_to_admin_user(manager_lockfile); + + if (err > 0) { + // 'lockfile_open' returns the file descriptor of the opened + // lockfile. We need to close this before spawning the + // manager so that the manager can grab the lock. + manager_lf.Close(); + +#if !defined(MANAGER_FLAP_DETECTION) + // Make sure we don't have a stray traffic server running. + cop_log(COP_WARNING, "traffic_manager not running, making sure traffic_server is dead\n"); + safe_kill(server_lockfile, server_binary, false); + // Spawn the manager. + cop_log(COP_WARNING, "spawning traffic_manager\n"); + spawn_manager(); +#else + // Make sure we don't have a stray traffic server running. + if (!manager_flapping) { + cop_log(COP_WARNING, "traffic_manager not running, making sure traffic_server is dead\n"); + safe_kill(server_lockfile, server_binary, false); + } + // Spawn the manager (check for flapping manager too) + ink_hrtime now = milliseconds(); + if (!manager_flapping) { + if ((manager_flap_interval_start_time == 0) || + (now - manager_flap_interval_start_time > MANAGER_FLAP_INTERVAL_MSEC) + ) { + // either: + // . it's our first time through + // . we were flapping a while ago, but we would + // like to retry now + // . it's been a while since we last tried to start + // traffic_manager + manager_flap_count = 0; + } + if (manager_flap_count >= MANAGER_MAX_FLAP_COUNT) { + // we've flapped too many times, hold off for a while + cop_log(COP_WARNING, "unable to start traffic_manager, retrying in %d second(s)\n", + MANAGER_FLAP_RETRY_MSEC / 1000); + manager_flapping = true; + manager_flap_retry_start_time = now; + } else { + // try to spawn traffic_manager + cop_log(COP_WARNING, "spawning traffic_manager\n"); + spawn_manager(); + // track spawn attempt + if (manager_flap_count == 0) { + manager_flap_interval_start_time = now; + } + manager_flap_count++; + } + } else { + // we were flapping, take some time off and don't call + // spawn_manager + if (now - manager_flap_retry_start_time > MANAGER_FLAP_RETRY_MSEC) { + manager_flapping = false; + manager_flap_interval_start_time = 0; + } + } +#endif + } else { + // If there is a manager running we want to heartbeat it to + // make sure it hasn't wedged. If the manager test succeeds we + // check to see if the server is up. (That is, it hasn't been + // brought down via the UI). If the manager thinks the server + // is up, we make sure there is actually a server process + // running. If there is we test it. + + alarm(2 * manager_timeout); + err = heartbeat_manager(); + alarm(0); + + if (err < 0) { + return; + } + + if (server_up() <= 0) { + return; + } + + Lockfile server_lf(server_lockfile); + err = server_lf.Open(&holding_pid); + + if (err > 0) { + server_lf.Close(); + + server_not_found += 1; + cop_log(COP_WARNING, "cannot find traffic_server [%d]\n", server_not_found); + + if (server_not_found > 1) { + server_not_found = 0; + cop_log(COP_WARNING, "killing manager\n"); + safe_kill(manager_lockfile, manager_binary, true); + } + } else { + alarm(2 * server_timeout); + heartbeat_server(); + alarm(0); + } + } + cop_log_trace("Leaving check_programs()\n"); +} + +static void +check_memory() +{ + // TODO: We need to take care of other systems, ie bsd, solaris. + // And we should try to summarize whether the swapping is really + // putting the server under memory pressure. Or should we check + // the process memory usage of the server & manager? + cop_log_trace("Entering check_memory()\n"); +#if defined(linux) + if (check_memory_min_swapfree_kb > 0 || check_memory_min_memfree_kb > 0) { + FILE *fp; + char buf[LINE_MAX]; + long long memfree, swapfree, swapsize; + memfree = swapfree = swapsize = 0; + if ((fp = fopen("/proc/meminfo", "r"))) { + while (fgets(buf, sizeof buf, fp)) { + if (strncmp(buf, "MemFree:", sizeof "MemFree:" - 1) == 0) + memfree = strtoll(buf + sizeof "MemFree:" - 1, 0, 10); + else if (strncmp(buf, "SwapFree:", sizeof "SwapFree:" - 1) == 0) + swapfree = strtoll(buf + sizeof "SwapFree:" - 1, 0, 10); + else if (strncmp(buf, "SwapTotal:", sizeof "SwapTotal:" - 1) == 0) + swapsize = strtoll(buf + sizeof "SwapTotal:" - 1, 0, 10); + } + fclose(fp); + // simple heuristic for linux + // swapsize swapfree memfree + // 1: >0 low high (bad) + // 2: >0 high low (okay) + // 3: >0 low low (bad; covered by 1) + // 4: 0 0 high (okay) + // 5: 0 0 low (bad) + if ((swapsize != 0 && swapfree < check_memory_min_swapfree_kb) || + (swapsize == 0 && memfree < check_memory_min_memfree_kb)) { + cop_log(COP_WARNING, "Low memory available (swap: %dkB, mem: %dkB)\n", (int) swapfree, (int) memfree); + cop_log(COP_WARNING, "Killing '%s' and '%s'\n", manager_binary, server_binary); + manager_failures = 0; + safe_kill(manager_lockfile, manager_binary, true); + server_failures = 0; + safe_kill(server_lockfile, server_binary, false); + } + } else { + cop_log(COP_WARNING, "Unable to open /proc/meminfo: %s\n", strerror(errno)); + } + } +#endif + cop_log_trace("Leaving check_memory()\n"); +} + +static int +check_no_run() +{ + char path[PATH_NAME_MAX * 2]; + struct stat info; + int err; + + cop_log_trace("Entering check_no_run()\n"); + snprintf(path, sizeof(path), "%s/no_cop", runtime_dir); + + do { + err = stat(path, &info); + } while ((err < 0) && (transient_error(errno, TRANSIENT_ERROR_WAIT_MS))); + + if (err < 0) { + cop_log_trace("Leaving check_no_run() --> 0\n"); + return 0; + } + + cop_log(COP_WARNING, "encountered \"%s\" file...exiting\n", path); + cop_log_trace("Leaving check_no_run() --> -1\n"); + return -1; +} + +// Changed function from taking no argument and returning void +// to taking a void* and returning a void*. The change was made +// so that we can call ink_thread_create() on this function +// in the case of running cop as a win32 service. +static void* +check(void *arg) +{ + bool mgmt_init = false; + cop_log_trace("Entering check()\n"); + + for (;;) { + // problems with the ownership of this file as root Make sure it is + // owned by the admin user + chown_file_to_admin_user(manager_lockfile); + chown_file_to_admin_user(server_lockfile); + + alarm(2 * (sleep_time + manager_timeout * 2 + server_timeout)); + + if (check_no_run() < 0) { + break; + } + // Log any SIGCLD singals we received + if (child_pid > 0) { + if (WIFEXITED(child_status) == 0) { + // Child terminated abnormally + cop_log(COP_WARNING, + "cop received non-normal child status signal [%d %d]\n", child_pid, WEXITSTATUS(child_status)); + } else { + // normal termination + cop_log(COP_WARNING, "cop received child status signal [%d %d]\n", child_pid, child_status); + } + if (WIFSIGNALED(child_status)) { + int sig = WTERMSIG(child_status); + cop_log(COP_WARNING, "child terminated due to signal %d: %s\n", sig, strsignal(sig)); + } + + child_pid = 0; + child_status = 0; + } + + // Re-read the config file information + config_reload_records(); + + // Check to make sure the programs are running + check_programs(); + + // Check to see if we're running out of free memory + check_memory(); + + // Pause to catch our breath. (10 seconds). + // Use 'millisleep()' because normal 'sleep()' interferes with + // the SIGALRM signal which we use to heartbeat the cop. + millisleep(sleep_time * 1000); + + // We do this after the first round of checks, since the first "check" will spawn traffic_manager + if (!mgmt_init) { + TSInit(Layout::get()->runtimedir, static_cast(TS_MGMT_OPT_NO_EVENTS)); + mgmt_init = true; + } + } + + // Done with the mgmt API. + TSTerminate(); + + cop_log_trace("Leaving check()\n"); + return arg; +} + + +static void +check_lockfile() +{ + int err; + pid_t holding_pid; + + cop_log_trace("Entering check_lockfile()\n"); + Lockfile cop_lf(cop_lockfile); + err = cop_lf.Get(&holding_pid); + if (err < 0) { + cop_log(COP_WARNING, "periodic cop heartbeat couldn't open '%s' (errno %d)\n", cop_lockfile, -err); + exit(1); + } else if (err == 0) { + cop_log(COP_DEBUG, "periodic heartbeat successful, another cop still on duty\n"); + exit(1); + } + + cop_log(COP_NOTICE, "--- Cop Starting [Version: %s] ---\n", appVersionInfo.FullVersionInfoStr); + cop_log_trace("Leaving check_lockfile()\n"); +} + +static void +init_signals() +{ + struct sigaction action; + + cop_log_trace("Entering init_signals()\n"); + // Handle the SIGTERM and SIGINT signal: + // We kill the process group and wait() for all children + action.sa_handler = sig_term; + sigemptyset(&action.sa_mask); + action.sa_flags = 0; + + sigaction(SIGTERM, &action, NULL); + sigaction(SIGINT, &action, NULL); + + // Handle the SIGCHLD signal. We simply reap all children that + // die (which should only be spawned traffic_manager's). + action.sa_handler = sig_child; + sigemptyset(&action.sa_mask); + action.sa_flags = 0; + + sigaction(SIGCHLD, &action, NULL); + + // Handle a bunch of fatal signals. We simply call abort() when + // these signals arrive in order to generate a core. There is some + // difficulty with generating core files when linking with libthread + // under solaris. +#if defined(solaris) + action.sa_handler = NULL; + action.sa_sigaction = sig_fatal; +#else + action.sa_handler = sig_fatal; +#endif + sigemptyset(&action.sa_mask); +#if defined(solaris) + action.sa_flags = SA_SIGINFO; +#else + action.sa_flags = 0; +#endif + + sigaction(SIGQUIT, &action, NULL); + sigaction(SIGILL, &action, NULL); + sigaction(SIGFPE, &action, NULL); + sigaction(SIGBUS, &action, NULL); + sigaction(SIGSEGV, &action, NULL); +#if !defined(linux) + sigaction(SIGEMT, &action, NULL); + sigaction(SIGSYS, &action, NULL); +#endif + + // Handle the SIGALRM signal. We use this signal to make sure the + // cop never wedges. It gets reset every time through its loop. If + // the alarm ever expires we treat it as a fatal signal and dump + // core, secure in the knowledge we'll get restarted. + set_alarm_death(); + + action.sa_handler = sig_ignore; + sigemptyset(&action.sa_mask); + action.sa_flags = 0; + + sigaction(SIGPIPE, &action, NULL); + cop_log_trace("Leaving init_signals()\n"); +} + +static void +init_lockfiles() +{ + + cop_log_trace("Entering init_lockfiles()\n"); + Layout::relative_to(cop_lockfile, sizeof(cop_lockfile), runtime_dir, COP_LOCK); + Layout::relative_to(manager_lockfile, sizeof(manager_lockfile), runtime_dir, MANAGER_LOCK); + Layout::relative_to(server_lockfile, sizeof(server_lockfile), runtime_dir, SERVER_LOCK); + + cop_log_trace("Leaving init_lockfiles()\n"); +} + +inline static void +init_syslog() +{ + openlog("traffic_cop", LOG_PID | LOG_NDELAY | LOG_NOWAIT, LOG_DAEMON); +} + +static void +init_config_file() +{ + struct stat info; + const char * config_dir; + + cop_log_trace("Entering init_config_file()\n"); + + config_dir = Layout::get()->sysconfdir; + if (stat(config_dir, &info) < 0) { + cop_log(COP_FATAL, "unable to locate config directory '%s'\n",config_dir); + cop_log(COP_FATAL, " please try setting correct root path in env variable TS_ROOT \n"); + exit(1); + } + + Layout::relative_to(config_file, sizeof(config_file), config_dir, "records.config.shadow"); + if (stat(config_file, &info) < 0) { + Layout::relative_to(config_file, sizeof(config_file), config_dir, "records.config"); + if (stat(config_file, &info) < 0) { + cop_log(COP_FATAL, "unable to locate \"%s/records.config\" or \"%s/records.config.shadow\"\n", config_dir, config_dir); + exit(1); + } + } + cop_log_trace("Leaving init_config_file()\n"); +} + +static void +init() +{ + struct stat info; + + cop_log_trace("Entering init()\n"); + + RecConfigFileInit(); + + init_signals(); + init_syslog(); + + init_config_file(); + config_reload_records(); + + runtime_dir = config_read_runtime_dir(); + if (stat(runtime_dir, &info) < 0) { + cop_log(COP_FATAL, "unable to locate local state directory '%s'\n",runtime_dir); + cop_log(COP_FATAL, " please try setting correct root path in either env variable TS_ROOT \n"); + exit(1); + } + + init_lockfiles(); + check_lockfile(); + + cop_log_trace("Leaving init()\n"); +} + +static const ArgumentDescription argument_descriptions[] = { + { "debug", 'd', "Enable debug logging", "F", &debug_flag, NULL, NULL }, + { "stdout", 'o', "Print log messages to standard output", "F", &stdout_flag, NULL, NULL }, + { "stop", 's', "Send child processes SIGSTOP instead of SIGKILL", "F", &stop_flag, NULL, NULL }, + { "version", 'V', "Print Version String", "T", &version_flag, NULL, NULL}, +}; + +int +main(int /* argc */, char *argv[]) +{ + int fd; + appVersionInfo.setup(PACKAGE_NAME,"traffic_cop", PACKAGE_VERSION, __DATE__, __TIME__, BUILD_MACHINE, BUILD_PERSON, ""); + + // Before accessing file system initialize Layout engine + Layout::create(); + + process_args(argument_descriptions, countof(argument_descriptions), argv); + + // Check for version number request + if (version_flag) { + fprintf(stderr, "%s\n", appVersionInfo.FullVersionInfoStr); + exit(0); + } + + if (stop_flag) { + cop_log_trace("Cool! I think I'll be a STOP cop!"); + killsig = SIGSTOP; + } + + signal(SIGHUP, SIG_IGN); + signal(SIGTSTP, SIG_IGN); + signal(SIGTTOU, SIG_IGN); + signal(SIGTTIN, SIG_IGN); + + // setup supplementary groups if it is not set. any way, worth a try. + if (0 == getgroups(0, NULL)) { + uid_t uid = getuid(); + gid_t gid = getgid(); + + const int bufSize = 1024; + char buf[bufSize]; + + struct passwd passwdInfo; + struct passwd *ppasswd = NULL; + int res; + res = getpwuid_r(uid, &passwdInfo, buf, bufSize, &ppasswd); + if (!res && ppasswd) { + initgroups(ppasswd->pw_name,gid); + } + } + + setsid(); // Important, thanks Vlad. :) +#if (defined(freebsd) && !defined(kfreebsd)) || defined(openbsd) + setpgrp(0,0); +#else + setpgrp(); +#endif + + // Detach STDIN, STDOUT, and STDERR (basically, "nohup"). /leif + if (!stdout_flag) { + close(STDIN_FILENO); + close(STDOUT_FILENO); + close(STDERR_FILENO); + if ((fd = open("/dev/null", O_WRONLY, 0)) >= 0) { + fcntl(fd, F_DUPFD, STDIN_FILENO); + fcntl(fd, F_DUPFD, STDOUT_FILENO); + fcntl(fd, F_DUPFD, STDERR_FILENO); + close(fd); + } else { + ink_fputln(stderr, "Unable to open /dev/null"); + return 0; + } + } + + // Initialize and start it up. + init(); + check(NULL); + + return 0; +} + diff --git a/cmd/traffic_line/Makefile.am b/cmd/traffic_line/Makefile.am new file mode 100644 index 00000000..594a874a --- /dev/null +++ b/cmd/traffic_line/Makefile.am @@ -0,0 +1,36 @@ +# +# Makefile.am for the Enterprise Management module. +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +AM_CPPFLAGS = \ + $(iocore_include_dirs) \ + -I$(top_srcdir)/lib \ + -I$(top_srcdir)/lib/ts \ + -I$(top_srcdir)/mgmt/api/include + +bin_PROGRAMS = traffic_line + +traffic_line_SOURCES = \ + traffic_line.cc + +traffic_line_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ +traffic_line_LDADD = \ + $(top_builddir)/mgmt/api/libtsmgmtshare.la \ + $(top_builddir)/mgmt/api/libtsmgmt.la \ + $(top_builddir)/lib/ts/libtsutil.la \ + @LIBRESOLV@ @LIBTCL@ @OPENSSL_LIBS@ diff --git a/cmd/traffic_line/Makefile.in b/cmd/traffic_line/Makefile.in new file mode 100644 index 00000000..a885a1d8 --- /dev/null +++ b/cmd/traffic_line/Makefile.in @@ -0,0 +1,902 @@ +# Makefile.in generated by automake 1.13.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Makefile.am for the Enterprise Management module. +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +bin_PROGRAMS = traffic_line$(EXEEXT) +subdir = cmd/traffic_line +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(top_srcdir)/build/aux/depcomp +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/ax_boost_base.m4 \ + $(top_srcdir)/build/ax_check_openssl.m4 \ + $(top_srcdir)/build/ax_compare_version.m4 \ + $(top_srcdir)/build/ax_compiler_vendor.m4 \ + $(top_srcdir)/build/ax_cxx_compile_stdcxx_11.m4 \ + $(top_srcdir)/build/ax_lib_curl.m4 \ + $(top_srcdir)/build/ax_lib_readline.m4 \ + $(top_srcdir)/build/ax_path_generic.m4 \ + $(top_srcdir)/build/ax_perl_modules.m4 \ + $(top_srcdir)/build/ax_with_curses.m4 \ + $(top_srcdir)/build/common.m4 $(top_srcdir)/build/crypto.m4 \ + $(top_srcdir)/build/jemalloc.m4 $(top_srcdir)/build/libtool.m4 \ + $(top_srcdir)/build/ltoptions.m4 \ + $(top_srcdir)/build/ltsugar.m4 \ + $(top_srcdir)/build/ltversion.m4 \ + $(top_srcdir)/build/lt~obsolete.m4 $(top_srcdir)/build/lzma.m4 \ + $(top_srcdir)/build/network.m4 $(top_srcdir)/build/pcre.m4 \ + $(top_srcdir)/build/pkg.m4 $(top_srcdir)/build/tcl.m4 \ + $(top_srcdir)/build/tcmalloc.m4 $(top_srcdir)/build/xml.m4 \ + $(top_srcdir)/build/zlib.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/lib/ts/ink_autoconf.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +PROGRAMS = $(bin_PROGRAMS) +am_traffic_line_OBJECTS = traffic_line.$(OBJEXT) +traffic_line_OBJECTS = $(am_traffic_line_OBJECTS) +traffic_line_DEPENDENCIES = \ + $(top_builddir)/mgmt/api/libtsmgmtshare.la \ + $(top_builddir)/mgmt/api/libtsmgmt.la \ + $(top_builddir)/lib/ts/libtsutil.la +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +traffic_line_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(traffic_line_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/lib/ts +depcomp = $(SHELL) $(top_srcdir)/build/aux/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +AM_V_CXX = $(am__v_CXX_@AM_V@) +am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) +am__v_CXX_0 = @echo " CXX " $@; +am__v_CXX_1 = +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) +am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) +am__v_CXXLD_0 = @echo " CXXLD " $@; +am__v_CXXLD_1 = +SOURCES = $(traffic_line_SOURCES) +DIST_SOURCES = $(traffic_line_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +pkgdatadir = @pkgdatadir@ +pkglibdir = @pkglibdir@ +pkglibexecdir = @pkglibexecdir@ +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_CFLAGS = @AM_CFLAGS@ +AM_CXXFLAGS = @AM_CXXFLAGS@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +API_DEFS = @API_DEFS@ +AR = @AR@ +ASCPP = @ASCPP@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCACHE = @CCACHE@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL_CFLAGS = @CURL_CFLAGS@ +CURL_CONFIG = @CURL_CONFIG@ +CURL_LIBS = @CURL_LIBS@ +CURSES_LIB = @CURSES_LIB@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +EXPAT_LDFLAGS = @EXPAT_LDFLAGS@ +EXTRA_CC_LDFLAGS = @EXTRA_CC_LDFLAGS@ +EXTRA_CXX_LDFLAGS = @EXTRA_CXX_LDFLAGS@ +FGREP = @FGREP@ +FLEX_CFLAGS = @FLEX_CFLAGS@ +GEOIP_LIBS = @GEOIP_LIBS@ +GREP = @GREP@ +HOST_GUESS = @HOST_GUESS@ +HWLOC_CFLAGS = @HWLOC_CFLAGS@ +HWLOC_LIBS = @HWLOC_LIBS@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LEX = @LEX@ +LEXLIB = @LEXLIB@ +LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@ +LIBCAP = @LIBCAP@ +LIBCRYPT = @LIBCRYPT@ +LIBDEMANGLE = @LIBDEMANGLE@ +LIBEXPAT = @LIBEXPAT@ +LIBLZMA = @LIBLZMA@ +LIBOBJS = @LIBOBJS@ +LIBPCRE = @LIBPCRE@ +LIBPROFILER = @LIBPROFILER@ +LIBREADLINE = @LIBREADLINE@ +LIBRESOLV = @LIBRESOLV@ +LIBS = @LIBS@ +LIBTCL = @LIBTCL@ +LIBTOOL = @LIBTOOL@ +LIBTOOL_LINK_FLAGS = @LIBTOOL_LINK_FLAGS@ +LIBZ = @LIBZ@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LUA_LDFLAGS = @LUA_LDFLAGS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MGMT_DEFS = @MGMT_DEFS@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPENSSL_INCLUDES = @OPENSSL_INCLUDES@ +OPENSSL_LDFLAGS = @OPENSSL_LDFLAGS@ +OPENSSL_LIBS = @OPENSSL_LIBS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PCRE_CONFIG = @PCRE_CONFIG@ +PERL = @PERL@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PYTHON = @PYTHON@ +PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ +PYTHON_PLATFORM = @PYTHON_PLATFORM@ +PYTHON_PREFIX = @PYTHON_PREFIX@ +PYTHON_VERSION = @PYTHON_VERSION@ +RANLIB = @RANLIB@ +RM = @RM@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHARED_CFLAGS = @SHARED_CFLAGS@ +SHARED_CXXFLAGS = @SHARED_CXXFLAGS@ +SHARED_CXXLINKFLAGS = @SHARED_CXXLINKFLAGS@ +SHARED_LDFLAGS = @SHARED_LDFLAGS@ +SHELL = @SHELL@ +SPDYLAY_CFLAGS = @SPDYLAY_CFLAGS@ +SPDYLAY_LIBS = @SPDYLAY_LIBS@ +SPHINXBUILD = @SPHINXBUILD@ +SPHINXOPTS = @SPHINXOPTS@ +STRIP = @STRIP@ +TCL_BIN_DIR = @TCL_BIN_DIR@ +TCL_LIB_FILE = @TCL_LIB_FILE@ +TCL_LIB_FLAG = @TCL_LIB_FLAG@ +TCL_LIB_SPEC = @TCL_LIB_SPEC@ +TCL_PATCH_LEVEL = @TCL_PATCH_LEVEL@ +TCL_SRC_DIR = @TCL_SRC_DIR@ +TCL_STUB_LIB_FILE = @TCL_STUB_LIB_FILE@ +TCL_STUB_LIB_FLAG = @TCL_STUB_LIB_FLAG@ +TCL_STUB_LIB_SPEC = @TCL_STUB_LIB_SPEC@ +TCL_VERSION = @TCL_VERSION@ +TFW_PACKAGE_SUFFIX = @TFW_PACKAGE_SUFFIX@ +TS_LIBTOOL_VERSION = @TS_LIBTOOL_VERSION@ +TS_LUAJIT_LDFLAGS = @TS_LUAJIT_LDFLAGS@ +TS_MAN1_MANPAGES = @TS_MAN1_MANPAGES@ +TS_MAN3_MANPAGES = @TS_MAN3_MANPAGES@ +TS_MAN5_MANPAGES = @TS_MAN5_MANPAGES@ +TS_MAN8_MANPAGES = @TS_MAN8_MANPAGES@ +TS_VERSION_MAJOR = @TS_VERSION_MAJOR@ +TS_VERSION_MICRO = @TS_VERSION_MICRO@ +TS_VERSION_MINOR = @TS_VERSION_MINOR@ +TS_VERSION_NUMBER = @TS_VERSION_NUMBER@ +TS_VERSION_STRING = @TS_VERSION_STRING@ +VERSION = @VERSION@ +YACC = @YACC@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +ac_cv_sizeof_voidp = @ac_cv_sizeof_voidp@ +allocah = @allocah@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +arpa_ineth = @arpa_ineth@ +arpa_nameser_compath = @arpa_nameser_compath@ +arpa_nameserh = @arpa_nameserh@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_group = @build_group@ +build_machine = @build_machine@ +build_os = @build_os@ +build_person = @build_person@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +cpioh = @cpioh@ +ctypeh = @ctypeh@ +datadir = @datadir@ +datarootdir = @datarootdir@ +default_loopback_iface = @default_loopback_iface@ +defer_accept = @defer_accept@ +docdir = @docdir@ +dvidir = @dvidir@ +enable_remote_cov_commit = @enable_remote_cov_commit@ +endianh = @endianh@ +exec_prefix = @exec_prefix@ +execinfoh = @execinfoh@ +exp_bindir = @exp_bindir@ +exp_cachedir = @exp_cachedir@ +exp_datadir = @exp_datadir@ +exp_docdir = @exp_docdir@ +exp_exec_prefix = @exp_exec_prefix@ +exp_includedir = @exp_includedir@ +exp_infodir = @exp_infodir@ +exp_installbuilddir = @exp_installbuilddir@ +exp_libdir = @exp_libdir@ +exp_libexecdir = @exp_libexecdir@ +exp_localstatedir = @exp_localstatedir@ +exp_logdir = @exp_logdir@ +exp_mandir = @exp_mandir@ +exp_prefix = @exp_prefix@ +exp_runtimedir = @exp_runtimedir@ +exp_sbindir = @exp_sbindir@ +exp_sysconfdir = @exp_sysconfdir@ +expath = @expath@ +floath = @floath@ +gethostbyname_r_glibc2 = @gethostbyname_r_glibc2@ +has_128bit_cas = @has_128bit_cas@ +has_backtrace = @has_backtrace@ +has_in6_is_addr_unspecified = @has_in6_is_addr_unspecified@ +has_interim_cache = @has_interim_cache@ +has_ip_tos = @has_ip_tos@ +has_profiler = @has_profiler@ +has_so_mark = @has_so_mark@ +has_spdy = @has_spdy@ +has_tcmalloc = @has_tcmalloc@ +has_tests = @has_tests@ +has_wccp = @has_wccp@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +ifaddrsh = @ifaddrsh@ +includedir = @includedir@ +infodir = @infodir@ +ink_with_modules_local = @ink_with_modules_local@ +ink_with_modules_process = @ink_with_modules_process@ +install_sh = @install_sh@ +installbuilddir = @installbuilddir@ +iocore_include_dirs = @iocore_include_dirs@ +ip_transparent = @ip_transparent@ +jemalloch = @jemalloch@ +libdir = @libdir@ +libexecdir = @libexecdir@ +libgenh = @libgenh@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +lzmah = @lzmah@ +machine_endianh = @machine_endianh@ +malloch = @malloch@ +mandir = @mandir@ +mathh = @mathh@ +max_api_stats = @max_api_stats@ +max_event_threads = @max_event_threads@ +max_host_name_len = @max_host_name_len@ +max_threads_per_type = @max_threads_per_type@ +mkdir_p = @mkdir_p@ +net_ppp_defsh = @net_ppp_defsh@ +netdbh = @netdbh@ +netinet_in_systmh = @netinet_in_systmh@ +netinet_inh = @netinet_inh@ +netinet_ip_icmph = @netinet_ip_icmph@ +netinet_iph = @netinet_iph@ +netinet_tcph = @netinet_tcph@ +oldincludedir = @oldincludedir@ +pcre_pcreh = @pcre_pcreh@ +pcreh = @pcreh@ +pdfdir = @pdfdir@ +pkgbindir = @pkgbindir@ +pkgcachedir = @pkgcachedir@ +pkgdocdir = @pkgdocdir@ +pkglocalstatedir = @pkglocalstatedir@ +pkglogdir = @pkglogdir@ +pkgpyexecdir = @pkgpyexecdir@ +pkgpythondir = @pkgpythondir@ +pkgruntimedir = @pkgruntimedir@ +pkgsbindir = @pkgsbindir@ +pkgsysconfdir = @pkgsysconfdir@ +pkgsysgroup = @pkgsysgroup@ +pkgsysuser = @pkgsysuser@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pthread_nph = @pthread_nph@ +pyexecdir = @pyexecdir@ +pythondir = @pythondir@ +readline_readlineh = @readline_readlineh@ +rel_bindir = @rel_bindir@ +rel_cachedir = @rel_cachedir@ +rel_datadir = @rel_datadir@ +rel_docdir = @rel_docdir@ +rel_exec_prefix = @rel_exec_prefix@ +rel_includedir = @rel_includedir@ +rel_infodir = @rel_infodir@ +rel_installbuilddir = @rel_installbuilddir@ +rel_libdir = @rel_libdir@ +rel_libexecdir = @rel_libexecdir@ +rel_localstatedir = @rel_localstatedir@ +rel_logdir = @rel_logdir@ +rel_mandir = @rel_mandir@ +rel_prefix = @rel_prefix@ +rel_runtimedir = @rel_runtimedir@ +rel_sbindir = @rel_sbindir@ +rel_sysconfdir = @rel_sysconfdir@ +runtimedir = @runtimedir@ +sbindir = @sbindir@ +schedh = @schedh@ +sharedstatedir = @sharedstatedir@ +siginfoh = @siginfoh@ +srcdir = @srcdir@ +stroptsh = @stroptsh@ +sys_byteorderh = @sys_byteorderh@ +sys_cpuseth = @sys_cpuseth@ +sys_epollh = @sys_epollh@ +sys_eventh = @sys_eventh@ +sys_ioctlh = @sys_ioctlh@ +sys_mounth = @sys_mounth@ +sys_paramh = @sys_paramh@ +sys_pseth = @sys_pseth@ +sys_sockioh = @sys_sockioh@ +sys_sysctlh = @sys_sysctlh@ +sys_sysinfoh = @sys_sysinfoh@ +sys_sysmacrosh = @sys_sysmacrosh@ +sys_systeminfoh = @sys_systeminfoh@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +use_diags = @use_diags@ +use_epoll = @use_epoll@ +use_fast_sdk = @use_fast_sdk@ +use_freelist = @use_freelist@ +use_hwloc = @use_hwloc@ +use_kqueue = @use_kqueue@ +use_linux_native_aio = @use_linux_native_aio@ +use_port = @use_port@ +use_posix_cap = @use_posix_cap@ +use_reclaimable_freelist = @use_reclaimable_freelist@ +use_tls_alpn = @use_tls_alpn@ +use_tls_eckey = @use_tls_eckey@ +use_tls_npn = @use_tls_npn@ +use_tls_sni = @use_tls_sni@ +use_tproxy = @use_tproxy@ +valuesh = @valuesh@ +waith = @waith@ +zlibh = @zlibh@ +AM_CPPFLAGS = \ + $(iocore_include_dirs) \ + -I$(top_srcdir)/lib \ + -I$(top_srcdir)/lib/ts \ + -I$(top_srcdir)/mgmt/api/include + +traffic_line_SOURCES = \ + traffic_line.cc + +traffic_line_LDFLAGS = @EXTRA_CXX_LDFLAGS@ @LIBTOOL_LINK_FLAGS@ +traffic_line_LDADD = \ + $(top_builddir)/mgmt/api/libtsmgmtshare.la \ + $(top_builddir)/mgmt/api/libtsmgmt.la \ + $(top_builddir)/lib/ts/libtsutil.la \ + @LIBRESOLV@ @LIBTCL@ @OPENSSL_LIBS@ + +all: all-am + +.SUFFIXES: +.SUFFIXES: .cc .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign cmd/traffic_line/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign cmd/traffic_line/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ + fi; \ + for p in $$list; do echo "$$p $$p"; done | \ + sed 's/$(EXEEXT)$$//' | \ + while read p p1; do if test -f $$p \ + || test -f $$p1 \ + ; then echo "$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n;h' \ + -e 's|.*|.|' \ + -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ + sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) files[d] = files[d] " " $$1; \ + else { print "f", $$3 "/" $$4, $$1; } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ + -e 's/$$/$(EXEEXT)/' \ + `; \ + test -n "$$list" || exit 0; \ + echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(bindir)" && rm -f $$files + +clean-binPROGRAMS: + @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list + +traffic_line$(EXEEXT): $(traffic_line_OBJECTS) $(traffic_line_DEPENDENCIES) $(EXTRA_traffic_line_DEPENDENCIES) + @rm -f traffic_line$(EXEEXT) + $(AM_V_CXXLD)$(traffic_line_LINK) $(traffic_line_OBJECTS) $(traffic_line_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/traffic_line.Po@am__quote@ + +.cc.o: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< + +.cc.obj: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cc.lo: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-binPROGRAMS + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \ + clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ + ctags ctags-am distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-binPROGRAMS \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am uninstall-binPROGRAMS + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/cmd/traffic_line/traffic_line.cc b/cmd/traffic_line/traffic_line.cc new file mode 100644 index 00000000..59391515 --- /dev/null +++ b/cmd/traffic_line/traffic_line.cc @@ -0,0 +1,376 @@ +/** @file + + A brief file description + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +#include "libts.h" + +#include "ink_args.h" +#include "I_Version.h" +#include "Tokenizer.h" +#include "TextBuffer.h" +#include "mgmtapi.h" + +static const char *programName; + +static char ReadVar[1024]; +static char MatchVar[1024]; +static char SetVar[1024]; +static char VarValue[1024]; +static int ReRead; +static int Shutdown; +static int BounceCluster; +static int BounceLocal; +static int QueryDeadhosts; +static int Startup; +static int ShutdownMgmtCluster; +static int ShutdownMgmtLocal; +static int ClearCluster; +static int ClearNode; +static char ZeroCluster[1024]; +static char ZeroNode[1024]; +static char StorageCmdOffline[1024]; +static int ShowAlarms; +static int ShowStatus; +static char ClearAlarms[1024]; +static int VersionFlag; + +static TSMgmtError +handleArgInvocation() +{ + if (ReRead == 1) { + return TSReconfigure(); + } else if (ShutdownMgmtCluster == 1) { + return TSRestart(true); + } else if (ShutdownMgmtLocal == 1) { + return TSRestart(false); + } else if (Shutdown == 1) { + return TSProxyStateSet(TS_PROXY_OFF, TS_CACHE_CLEAR_OFF); + } else if (BounceCluster == 1) { + return TSBounce(true); + } else if (BounceLocal == 1) { + return TSBounce(false); + } else if (Startup == 1) { + return TSProxyStateSet(TS_PROXY_ON, TS_CACHE_CLEAR_OFF); + } else if (ClearCluster == 1) { + return TSStatsReset(true, NULL); + } else if (ClearNode == 1) { + return TSStatsReset(false, NULL); + } else if (*ZeroNode != '\0' || *ZeroCluster != '\0') { + TSMgmtError err; + TSRecordEle *rec_ele = TSRecordEleCreate(); + char *name = *ZeroNode ? ZeroNode : ZeroCluster; + + if ((err = TSRecordGet(name, rec_ele)) != TS_ERR_OKAY) { + fprintf(stderr, "%s: %s\n", programName, TSGetErrorMessage(err)); + TSRecordEleDestroy(rec_ele); + return err; + } + TSRecordEleDestroy(rec_ele); + return TSStatsReset(*ZeroCluster ? true : false, name); + } else if (QueryDeadhosts == 1) { + fprintf(stderr, "Query Deadhosts is not implemented, it requires support for congestion control.\n"); + fprintf(stderr, "For more details, examine the old code in cli/CLI.cc: QueryDeadhosts()\n"); + return TS_ERR_FAIL; + } else if (*StorageCmdOffline) { + return TSStorageDeviceCmdOffline(StorageCmdOffline); + } else if (ShowAlarms == 1) { + // Show all active alarms, this was moved from the old traffic_shell implementation (show:alarms). + TSList events = TSListCreate(); + + if (TS_ERR_OKAY != TSActiveEventGetMlt(events)) { + TSListDestroy(events); + fprintf(stderr, "Error Retrieving Alarm List\n"); + return TS_ERR_FAIL; + } + + int count = TSListLen(events); + + if (count > 0) { + printf("Active Alarms\n"); + for (int i = 0; i < count; i++) { + char* name = static_cast(TSListDequeue(events)); + printf(" %d. %s\n", i + 1, name); + } + } else { + printf("\nNo active alarms.\n"); + } + TSListDestroy(events); + return TS_ERR_OKAY; + } else if (*ClearAlarms != '\0') { + // Clear (some) active alarms, this was moved from the old traffic_shell implementation (config:alarm) + TSList events = TSListCreate(); + size_t len = strlen(ClearAlarms); + + if (TS_ERR_OKAY != TSActiveEventGetMlt(events)) { + TSListDestroy(events); + fprintf(stderr, "Error Retrieving Alarm List\n"); + return TS_ERR_FAIL; + } + + int count = TSListLen(events); + + if (count == 0) { + printf("No Alarms to resolve\n"); + TSListDestroy(events); + return TS_ERR_OKAY; + } + + int errors = 0; + bool all = false; + int num = -1; + + if ((3 == len) && (0 == strncasecmp(ClearAlarms, "all", len))) { + all = true; + } else { + num = strtol(ClearAlarms, NULL, 10) - 1; + if (num <= 0) + num = -1; + } + + for (int i = 0; i < count; i++) { + char* name = static_cast(TSListDequeue(events)); + + if (all || ((num > -1) && (num == i)) || ((strlen(name) == len) && (0 == strncasecmp(ClearAlarms, name, len)))) { + if (TS_ERR_OKAY != TSEventResolve(name)) { + fprintf(stderr, "Errur: Unable to resolve alarm %s\n", name); + ++errors; + } + if (num > 0) // If a specific event number was specified, we can stop now + break; + } + } + TSListDestroy(events); + return (errors > 0 ? TS_ERR_FAIL: TS_ERR_OKAY); + } else if (ShowStatus == 1) { + switch (TSProxyStateGet()) { + case TS_PROXY_ON: + printf("Proxy -- on\n"); + break; + case TS_PROXY_OFF: + printf("Proxy -- off\n"); + break; + case TS_PROXY_UNDEFINED: + printf("Proxy status undefined\n"); + break; + } + return TS_ERR_OKAY; + } else if (*ReadVar != '\0') { // Handle a value read + if (*SetVar != '\0' || *VarValue != '\0') { + fprintf(stderr, "%s: Invalid Argument Combination: Can not read and set values at the same time\n", programName); + return TS_ERR_FAIL; + } else { + TSMgmtError err; + TSRecordEle *rec_ele = TSRecordEleCreate(); + + if ((err = TSRecordGet(ReadVar, rec_ele)) != TS_ERR_OKAY) { + fprintf(stderr, "%s: %s\n", programName, TSGetErrorMessage(err)); + } else { + switch (rec_ele->rec_type) { + case TS_REC_INT: + printf("%" PRId64 "\n", rec_ele->valueT.int_val); + break; + case TS_REC_COUNTER: + printf("%" PRId64 "\n", rec_ele->valueT.counter_val); + break; + case TS_REC_FLOAT: + printf("%f\n", rec_ele->valueT.float_val); + break; + case TS_REC_STRING: + printf("%s\n", rec_ele->valueT.string_val); + break; + default: + fprintf(stderr, "%s: unknown record type (%d)\n", programName, rec_ele->rec_type); + err = TS_ERR_FAIL; + break; + } + } + TSRecordEleDestroy(rec_ele); + return err; + } + } else if (*MatchVar != '\0') { // Handle a value read + if (*SetVar != '\0' || *VarValue != '\0') { + fprintf(stderr, "%s: Invalid Argument Combination: Can not read and set values at the same time\n", programName); + return TS_ERR_FAIL; + } else { + TSMgmtError err; + TSList list = TSListCreate(); + + if ((err = TSRecordGetMatchMlt(MatchVar, list)) != TS_ERR_OKAY) { + fprintf(stderr, "%s: %s\n", programName, TSGetErrorMessage(err)); + } + + // If the RPC call failed, the list will be empty, so we won't print anything. Otherwise, + // print all the results, freeing them as we go. + for (TSRecordEle * rec_ele = (TSRecordEle *) TSListDequeue(list); rec_ele; + rec_ele = (TSRecordEle *) TSListDequeue(list)) { + switch (rec_ele->rec_type) { + case TS_REC_INT: + printf("%s %" PRId64 "\n", rec_ele->rec_name, rec_ele->valueT.int_val); + break; + case TS_REC_COUNTER: + printf("%s %" PRId64 "\n", rec_ele->rec_name, rec_ele->valueT.counter_val); + break; + case TS_REC_FLOAT: + printf("%s %f\n", rec_ele->rec_name, rec_ele->valueT.float_val); + break; + case TS_REC_STRING: + printf("%s %s\n", rec_ele->rec_name, rec_ele->valueT.string_val); + break; + default: + // just skip it ... + break; + } + + TSRecordEleDestroy(rec_ele); + } + + TSListDestroy(list); + return err; + } + } else if (*SetVar != '\0') { // Setting a variable + if (*VarValue == '\0') { + fprintf(stderr, "%s: Set requires a -v argument\n", programName); + return TS_ERR_FAIL; + } else { + TSMgmtError err; + TSActionNeedT action; + + if ((err = TSRecordSet(SetVar, VarValue, &action)) != TS_ERR_OKAY) { + fprintf(stderr, "%s: Please correct your variable name and|or value\n", programName); + return err; + } + + switch (action) { + case TS_ACTION_SHUTDOWN: + printf("Set %s, full shutdown required\n", SetVar); + break; + case TS_ACTION_RESTART: + printf("Set %s, restart required\n", SetVar); + break; + case TS_ACTION_RECONFIGURE: + // printf("Set %s, reconfiguration required\n", SetVar); + break; + case TS_ACTION_DYNAMIC: + default: + printf("Set %s\n", SetVar); + break; + } + + return err; + } + } else if (*VarValue != '\0') { // We have a value but no variable to set + fprintf(stderr, "%s: Must specify variable to set with -s when using -v\n", programName); + return TS_ERR_FAIL; + } + + fprintf(stderr, "%s: No arguments specified\n", programName); + return TS_ERR_FAIL; +} + +int +main(int /* argc ATS_UNUSED */, char **argv) +{ + AppVersionInfo appVersionInfo; + TSMgmtError status; + + programName = argv[0]; + + ReadVar[0] = '\0'; + MatchVar[0] = '\0'; + SetVar[0] = '\0'; + VarValue[0] = '\0'; + ReRead = 0; + Shutdown = 0; + BounceCluster = 0; + BounceLocal = 0; + QueryDeadhosts = 0; + Startup = 0; + ShutdownMgmtCluster = 0; + ShutdownMgmtLocal = 0; + ClearCluster = 0; + ClearNode = 0; + ZeroCluster[0] = '\0'; + ZeroNode[0] = '\0'; + VersionFlag = 0; + *StorageCmdOffline = 0; + ShowAlarms = 0; + ShowStatus = 0; + ClearAlarms[0] = '\0'; + + // build the application information structure + appVersionInfo.setup(PACKAGE_NAME,"traffic_line", PACKAGE_VERSION, __DATE__, __TIME__, BUILD_MACHINE, BUILD_PERSON, ""); + +/* Argument description table used to describe how to parse command line args, */ +/* see 'ink_args.h' for meanings of the various fields */ + ArgumentDescription argument_descriptions[] = { + {"query_deadhosts", 'q', "Query congested sites", "F", &QueryDeadhosts, NULL, NULL}, + {"read_var", 'r', "Read Variable", "S1024", &ReadVar, NULL, NULL}, + {"match_var", 'm', "Match Variable", "S1024", &MatchVar, NULL, NULL}, + {"set_var", 's', "Set Variable (requires -v option)", "S1024", &SetVar, NULL, NULL}, + {"value", 'v', "Set Value (used with -s option)", "S1024", &VarValue, NULL, NULL}, + {"help", 'h', "Help", NULL, NULL, NULL, usage}, + {"reread_config", 'x', "Reread Config Files", "F", &ReRead, NULL, NULL}, + {"restart_cluster", 'M', "Restart traffic_manager (cluster wide)", "F", &ShutdownMgmtCluster, NULL, NULL}, + {"restart_local", 'L', "Restart traffic_manager (local node)", "F", &ShutdownMgmtLocal, NULL, NULL}, + {"shutdown", 'S', "Shutdown traffic_server (local node)", "F", &Shutdown, NULL, NULL}, + {"startup", 'U', "Start traffic_server (local node)", "F", &Startup, NULL, NULL}, + {"bounce_cluster", 'B', "Bounce traffic_server (cluster wide)", "F", &BounceCluster, NULL, NULL}, + {"bounce_local", 'b', "Bounce local traffic_server", "F", &BounceLocal, NULL, NULL}, + {"clear_cluster", 'C', "Clear Statistics (cluster wide)", "F", &ClearCluster, NULL, NULL}, + {"clear_node", 'c', "Clear Statistics (local node)", "F", &ClearNode, NULL, NULL}, + {"zero_cluster", 'Z', "Zero Specific Statistic (cluster wide)", "S1024", &ZeroCluster, NULL, NULL}, + {"zero_node", 'z', "Zero Specific Statistic (local node)", "S1024", &ZeroNode, NULL, NULL}, + {"offline", '-', "Mark cache storage offline", "S1024", &StorageCmdOffline, NULL, NULL}, + {"alarms", '-', "Show all alarms", "F", &ShowAlarms, NULL, NULL}, + {"clear_alarms", '-', "Clear specified, or all, alarms", "S1024", &ClearAlarms, NULL, NULL}, + {"status", '-', "Show proxy server status", "F", &ShowStatus, NULL, NULL}, + {"version", 'V', "Print Version Id", "T", &VersionFlag, NULL, NULL}, + }; + + // Process command line arguments and dump into variables + process_args(argument_descriptions, countof(argument_descriptions), argv); + + // check for the version number request + if (VersionFlag) { + ink_fputln(stderr, appVersionInfo.FullVersionInfoStr); + exit(0); + } + + // Connect to Local Manager and do it. + if (TS_ERR_OKAY != TSInit(NULL, static_cast(TS_MGMT_OPT_NO_EVENTS | TS_MGMT_OPT_NO_SOCK_TESTS))) { + fprintf(stderr, "error: could not connect to management port, make sure traffic_manager is running\n"); + exit(1); + } + + status = handleArgInvocation(); + + // Done with the mgmt API. + TSTerminate(); + + if (TS_ERR_OKAY != status) { + if (ReadVar[0] == '\0' && SetVar[0] == '\0') + fprintf(stderr, "error: the requested command failed\n"); + exit(1); + } + + exit(0); +} diff --git a/cmd/traffic_top/Makefile.am b/cmd/traffic_top/Makefile.am new file mode 100644 index 00000000..d9fc2e59 --- /dev/null +++ b/cmd/traffic_top/Makefile.am @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We only build tstop if libcurses and libcurl are available. +if BUILD_TSTOP + +bin_PROGRAMS = traffic_top + +AM_CPPFLAGS = $(iocore_include_dirs) \ + -I$(top_srcdir)/lib -I$(top_builddir)/lib \ + -I$(top_srcdir)/lib/ts -I$(top_builddir)/lib/ts + +traffic_top_CPPFLAGS = \ + -I$(top_srcdir)/mgmt/api/include +traffic_top_CXXFLAGS = \ + @CURL_CFLAGS@ + +traffic_top_SOURCES = traffic_top.cc +traffic_top_LDADD = \ + $(top_builddir)/mgmt/api/libtsmgmtshare.la \ + $(top_builddir)/mgmt/api/libtsmgmt.la \ + @CURL_LIBS@ @CURSES_LIB@ + +# Install a compatibility symlink because traffic_top was named tstop in previous releases. +install-exec-hook: + cd $(DESTDIR)$(bindir) && $(RM) tstop && $(LN_S) traffic_top tstop +endif diff --git a/cmd/traffic_top/Makefile.in b/cmd/traffic_top/Makefile.in new file mode 100644 index 00000000..c4cccb33 --- /dev/null +++ b/cmd/traffic_top/Makefile.in @@ -0,0 +1,922 @@ +# Makefile.in generated by automake 1.13.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +@BUILD_TSTOP_TRUE@bin_PROGRAMS = traffic_top$(EXEEXT) +subdir = cmd/traffic_top +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(top_srcdir)/build/aux/depcomp README +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/ax_boost_base.m4 \ + $(top_srcdir)/build/ax_check_openssl.m4 \ + $(top_srcdir)/build/ax_compare_version.m4 \ + $(top_srcdir)/build/ax_compiler_vendor.m4 \ + $(top_srcdir)/build/ax_cxx_compile_stdcxx_11.m4 \ + $(top_srcdir)/build/ax_lib_curl.m4 \ + $(top_srcdir)/build/ax_lib_readline.m4 \ + $(top_srcdir)/build/ax_path_generic.m4 \ + $(top_srcdir)/build/ax_perl_modules.m4 \ + $(top_srcdir)/build/ax_with_curses.m4 \ + $(top_srcdir)/build/common.m4 $(top_srcdir)/build/crypto.m4 \ + $(top_srcdir)/build/jemalloc.m4 $(top_srcdir)/build/libtool.m4 \ + $(top_srcdir)/build/ltoptions.m4 \ + $(top_srcdir)/build/ltsugar.m4 \ + $(top_srcdir)/build/ltversion.m4 \ + $(top_srcdir)/build/lt~obsolete.m4 $(top_srcdir)/build/lzma.m4 \ + $(top_srcdir)/build/network.m4 $(top_srcdir)/build/pcre.m4 \ + $(top_srcdir)/build/pkg.m4 $(top_srcdir)/build/tcl.m4 \ + $(top_srcdir)/build/tcmalloc.m4 $(top_srcdir)/build/xml.m4 \ + $(top_srcdir)/build/zlib.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/lib/ts/ink_autoconf.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +PROGRAMS = $(bin_PROGRAMS) +am__traffic_top_SOURCES_DIST = traffic_top.cc +@BUILD_TSTOP_TRUE@am_traffic_top_OBJECTS = \ +@BUILD_TSTOP_TRUE@ traffic_top-traffic_top.$(OBJEXT) +traffic_top_OBJECTS = $(am_traffic_top_OBJECTS) +@BUILD_TSTOP_TRUE@traffic_top_DEPENDENCIES = \ +@BUILD_TSTOP_TRUE@ $(top_builddir)/mgmt/api/libtsmgmtshare.la \ +@BUILD_TSTOP_TRUE@ $(top_builddir)/mgmt/api/libtsmgmt.la +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +traffic_top_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(traffic_top_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/lib/ts +depcomp = $(SHELL) $(top_srcdir)/build/aux/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +AM_V_CXX = $(am__v_CXX_@AM_V@) +am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) +am__v_CXX_0 = @echo " CXX " $@; +am__v_CXX_1 = +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) +am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) +am__v_CXXLD_0 = @echo " CXXLD " $@; +am__v_CXXLD_1 = +SOURCES = $(traffic_top_SOURCES) +DIST_SOURCES = $(am__traffic_top_SOURCES_DIST) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +pkgdatadir = @pkgdatadir@ +pkglibdir = @pkglibdir@ +pkglibexecdir = @pkglibexecdir@ +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_CFLAGS = @AM_CFLAGS@ +AM_CXXFLAGS = @AM_CXXFLAGS@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +API_DEFS = @API_DEFS@ +AR = @AR@ +ASCPP = @ASCPP@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCACHE = @CCACHE@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL_CFLAGS = @CURL_CFLAGS@ +CURL_CONFIG = @CURL_CONFIG@ +CURL_LIBS = @CURL_LIBS@ +CURSES_LIB = @CURSES_LIB@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +EXPAT_LDFLAGS = @EXPAT_LDFLAGS@ +EXTRA_CC_LDFLAGS = @EXTRA_CC_LDFLAGS@ +EXTRA_CXX_LDFLAGS = @EXTRA_CXX_LDFLAGS@ +FGREP = @FGREP@ +FLEX_CFLAGS = @FLEX_CFLAGS@ +GEOIP_LIBS = @GEOIP_LIBS@ +GREP = @GREP@ +HOST_GUESS = @HOST_GUESS@ +HWLOC_CFLAGS = @HWLOC_CFLAGS@ +HWLOC_LIBS = @HWLOC_LIBS@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LEX = @LEX@ +LEXLIB = @LEXLIB@ +LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@ +LIBCAP = @LIBCAP@ +LIBCRYPT = @LIBCRYPT@ +LIBDEMANGLE = @LIBDEMANGLE@ +LIBEXPAT = @LIBEXPAT@ +LIBLZMA = @LIBLZMA@ +LIBOBJS = @LIBOBJS@ +LIBPCRE = @LIBPCRE@ +LIBPROFILER = @LIBPROFILER@ +LIBREADLINE = @LIBREADLINE@ +LIBRESOLV = @LIBRESOLV@ +LIBS = @LIBS@ +LIBTCL = @LIBTCL@ +LIBTOOL = @LIBTOOL@ +LIBTOOL_LINK_FLAGS = @LIBTOOL_LINK_FLAGS@ +LIBZ = @LIBZ@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LUA_LDFLAGS = @LUA_LDFLAGS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MGMT_DEFS = @MGMT_DEFS@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPENSSL_INCLUDES = @OPENSSL_INCLUDES@ +OPENSSL_LDFLAGS = @OPENSSL_LDFLAGS@ +OPENSSL_LIBS = @OPENSSL_LIBS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PCRE_CONFIG = @PCRE_CONFIG@ +PERL = @PERL@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PYTHON = @PYTHON@ +PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ +PYTHON_PLATFORM = @PYTHON_PLATFORM@ +PYTHON_PREFIX = @PYTHON_PREFIX@ +PYTHON_VERSION = @PYTHON_VERSION@ +RANLIB = @RANLIB@ +RM = @RM@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHARED_CFLAGS = @SHARED_CFLAGS@ +SHARED_CXXFLAGS = @SHARED_CXXFLAGS@ +SHARED_CXXLINKFLAGS = @SHARED_CXXLINKFLAGS@ +SHARED_LDFLAGS = @SHARED_LDFLAGS@ +SHELL = @SHELL@ +SPDYLAY_CFLAGS = @SPDYLAY_CFLAGS@ +SPDYLAY_LIBS = @SPDYLAY_LIBS@ +SPHINXBUILD = @SPHINXBUILD@ +SPHINXOPTS = @SPHINXOPTS@ +STRIP = @STRIP@ +TCL_BIN_DIR = @TCL_BIN_DIR@ +TCL_LIB_FILE = @TCL_LIB_FILE@ +TCL_LIB_FLAG = @TCL_LIB_FLAG@ +TCL_LIB_SPEC = @TCL_LIB_SPEC@ +TCL_PATCH_LEVEL = @TCL_PATCH_LEVEL@ +TCL_SRC_DIR = @TCL_SRC_DIR@ +TCL_STUB_LIB_FILE = @TCL_STUB_LIB_FILE@ +TCL_STUB_LIB_FLAG = @TCL_STUB_LIB_FLAG@ +TCL_STUB_LIB_SPEC = @TCL_STUB_LIB_SPEC@ +TCL_VERSION = @TCL_VERSION@ +TFW_PACKAGE_SUFFIX = @TFW_PACKAGE_SUFFIX@ +TS_LIBTOOL_VERSION = @TS_LIBTOOL_VERSION@ +TS_LUAJIT_LDFLAGS = @TS_LUAJIT_LDFLAGS@ +TS_MAN1_MANPAGES = @TS_MAN1_MANPAGES@ +TS_MAN3_MANPAGES = @TS_MAN3_MANPAGES@ +TS_MAN5_MANPAGES = @TS_MAN5_MANPAGES@ +TS_MAN8_MANPAGES = @TS_MAN8_MANPAGES@ +TS_VERSION_MAJOR = @TS_VERSION_MAJOR@ +TS_VERSION_MICRO = @TS_VERSION_MICRO@ +TS_VERSION_MINOR = @TS_VERSION_MINOR@ +TS_VERSION_NUMBER = @TS_VERSION_NUMBER@ +TS_VERSION_STRING = @TS_VERSION_STRING@ +VERSION = @VERSION@ +YACC = @YACC@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +ac_cv_sizeof_voidp = @ac_cv_sizeof_voidp@ +allocah = @allocah@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +arpa_ineth = @arpa_ineth@ +arpa_nameser_compath = @arpa_nameser_compath@ +arpa_nameserh = @arpa_nameserh@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_group = @build_group@ +build_machine = @build_machine@ +build_os = @build_os@ +build_person = @build_person@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +cpioh = @cpioh@ +ctypeh = @ctypeh@ +datadir = @datadir@ +datarootdir = @datarootdir@ +default_loopback_iface = @default_loopback_iface@ +defer_accept = @defer_accept@ +docdir = @docdir@ +dvidir = @dvidir@ +enable_remote_cov_commit = @enable_remote_cov_commit@ +endianh = @endianh@ +exec_prefix = @exec_prefix@ +execinfoh = @execinfoh@ +exp_bindir = @exp_bindir@ +exp_cachedir = @exp_cachedir@ +exp_datadir = @exp_datadir@ +exp_docdir = @exp_docdir@ +exp_exec_prefix = @exp_exec_prefix@ +exp_includedir = @exp_includedir@ +exp_infodir = @exp_infodir@ +exp_installbuilddir = @exp_installbuilddir@ +exp_libdir = @exp_libdir@ +exp_libexecdir = @exp_libexecdir@ +exp_localstatedir = @exp_localstatedir@ +exp_logdir = @exp_logdir@ +exp_mandir = @exp_mandir@ +exp_prefix = @exp_prefix@ +exp_runtimedir = @exp_runtimedir@ +exp_sbindir = @exp_sbindir@ +exp_sysconfdir = @exp_sysconfdir@ +expath = @expath@ +floath = @floath@ +gethostbyname_r_glibc2 = @gethostbyname_r_glibc2@ +has_128bit_cas = @has_128bit_cas@ +has_backtrace = @has_backtrace@ +has_in6_is_addr_unspecified = @has_in6_is_addr_unspecified@ +has_interim_cache = @has_interim_cache@ +has_ip_tos = @has_ip_tos@ +has_profiler = @has_profiler@ +has_so_mark = @has_so_mark@ +has_spdy = @has_spdy@ +has_tcmalloc = @has_tcmalloc@ +has_tests = @has_tests@ +has_wccp = @has_wccp@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +ifaddrsh = @ifaddrsh@ +includedir = @includedir@ +infodir = @infodir@ +ink_with_modules_local = @ink_with_modules_local@ +ink_with_modules_process = @ink_with_modules_process@ +install_sh = @install_sh@ +installbuilddir = @installbuilddir@ +iocore_include_dirs = @iocore_include_dirs@ +ip_transparent = @ip_transparent@ +jemalloch = @jemalloch@ +libdir = @libdir@ +libexecdir = @libexecdir@ +libgenh = @libgenh@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +lzmah = @lzmah@ +machine_endianh = @machine_endianh@ +malloch = @malloch@ +mandir = @mandir@ +mathh = @mathh@ +max_api_stats = @max_api_stats@ +max_event_threads = @max_event_threads@ +max_host_name_len = @max_host_name_len@ +max_threads_per_type = @max_threads_per_type@ +mkdir_p = @mkdir_p@ +net_ppp_defsh = @net_ppp_defsh@ +netdbh = @netdbh@ +netinet_in_systmh = @netinet_in_systmh@ +netinet_inh = @netinet_inh@ +netinet_ip_icmph = @netinet_ip_icmph@ +netinet_iph = @netinet_iph@ +netinet_tcph = @netinet_tcph@ +oldincludedir = @oldincludedir@ +pcre_pcreh = @pcre_pcreh@ +pcreh = @pcreh@ +pdfdir = @pdfdir@ +pkgbindir = @pkgbindir@ +pkgcachedir = @pkgcachedir@ +pkgdocdir = @pkgdocdir@ +pkglocalstatedir = @pkglocalstatedir@ +pkglogdir = @pkglogdir@ +pkgpyexecdir = @pkgpyexecdir@ +pkgpythondir = @pkgpythondir@ +pkgruntimedir = @pkgruntimedir@ +pkgsbindir = @pkgsbindir@ +pkgsysconfdir = @pkgsysconfdir@ +pkgsysgroup = @pkgsysgroup@ +pkgsysuser = @pkgsysuser@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pthread_nph = @pthread_nph@ +pyexecdir = @pyexecdir@ +pythondir = @pythondir@ +readline_readlineh = @readline_readlineh@ +rel_bindir = @rel_bindir@ +rel_cachedir = @rel_cachedir@ +rel_datadir = @rel_datadir@ +rel_docdir = @rel_docdir@ +rel_exec_prefix = @rel_exec_prefix@ +rel_includedir = @rel_includedir@ +rel_infodir = @rel_infodir@ +rel_installbuilddir = @rel_installbuilddir@ +rel_libdir = @rel_libdir@ +rel_libexecdir = @rel_libexecdir@ +rel_localstatedir = @rel_localstatedir@ +rel_logdir = @rel_logdir@ +rel_mandir = @rel_mandir@ +rel_prefix = @rel_prefix@ +rel_runtimedir = @rel_runtimedir@ +rel_sbindir = @rel_sbindir@ +rel_sysconfdir = @rel_sysconfdir@ +runtimedir = @runtimedir@ +sbindir = @sbindir@ +schedh = @schedh@ +sharedstatedir = @sharedstatedir@ +siginfoh = @siginfoh@ +srcdir = @srcdir@ +stroptsh = @stroptsh@ +sys_byteorderh = @sys_byteorderh@ +sys_cpuseth = @sys_cpuseth@ +sys_epollh = @sys_epollh@ +sys_eventh = @sys_eventh@ +sys_ioctlh = @sys_ioctlh@ +sys_mounth = @sys_mounth@ +sys_paramh = @sys_paramh@ +sys_pseth = @sys_pseth@ +sys_sockioh = @sys_sockioh@ +sys_sysctlh = @sys_sysctlh@ +sys_sysinfoh = @sys_sysinfoh@ +sys_sysmacrosh = @sys_sysmacrosh@ +sys_systeminfoh = @sys_systeminfoh@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +use_diags = @use_diags@ +use_epoll = @use_epoll@ +use_fast_sdk = @use_fast_sdk@ +use_freelist = @use_freelist@ +use_hwloc = @use_hwloc@ +use_kqueue = @use_kqueue@ +use_linux_native_aio = @use_linux_native_aio@ +use_port = @use_port@ +use_posix_cap = @use_posix_cap@ +use_reclaimable_freelist = @use_reclaimable_freelist@ +use_tls_alpn = @use_tls_alpn@ +use_tls_eckey = @use_tls_eckey@ +use_tls_npn = @use_tls_npn@ +use_tls_sni = @use_tls_sni@ +use_tproxy = @use_tproxy@ +valuesh = @valuesh@ +waith = @waith@ +zlibh = @zlibh@ +@BUILD_TSTOP_TRUE@AM_CPPFLAGS = $(iocore_include_dirs) \ +@BUILD_TSTOP_TRUE@ -I$(top_srcdir)/lib -I$(top_builddir)/lib \ +@BUILD_TSTOP_TRUE@ -I$(top_srcdir)/lib/ts -I$(top_builddir)/lib/ts + +@BUILD_TSTOP_TRUE@traffic_top_CPPFLAGS = \ +@BUILD_TSTOP_TRUE@ -I$(top_srcdir)/mgmt/api/include + +@BUILD_TSTOP_TRUE@traffic_top_CXXFLAGS = \ +@BUILD_TSTOP_TRUE@ @CURL_CFLAGS@ + +@BUILD_TSTOP_TRUE@traffic_top_SOURCES = traffic_top.cc +@BUILD_TSTOP_TRUE@traffic_top_LDADD = \ +@BUILD_TSTOP_TRUE@ $(top_builddir)/mgmt/api/libtsmgmtshare.la \ +@BUILD_TSTOP_TRUE@ $(top_builddir)/mgmt/api/libtsmgmt.la \ +@BUILD_TSTOP_TRUE@ @CURL_LIBS@ @CURSES_LIB@ + +all: all-am + +.SUFFIXES: +.SUFFIXES: .cc .lo .o .obj +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign cmd/traffic_top/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign cmd/traffic_top/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ + fi; \ + for p in $$list; do echo "$$p $$p"; done | \ + sed 's/$(EXEEXT)$$//' | \ + while read p p1; do if test -f $$p \ + || test -f $$p1 \ + ; then echo "$$p"; echo "$$p"; else :; fi; \ + done | \ + sed -e 'p;s,.*/,,;n;h' \ + -e 's|.*|.|' \ + -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ + sed 'N;N;N;s,\n, ,g' | \ + $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ + { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ + if ($$2 == $$4) files[d] = files[d] " " $$1; \ + else { print "f", $$3 "/" $$4, $$1; } } \ + END { for (d in files) print "f", d, files[d] }' | \ + while read type dir files; do \ + if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ + test -z "$$files" || { \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ + } \ + ; done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ + files=`for p in $$list; do echo "$$p"; done | \ + sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ + -e 's/$$/$(EXEEXT)/' \ + `; \ + test -n "$$list" || exit 0; \ + echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ + cd "$(DESTDIR)$(bindir)" && rm -f $$files + +clean-binPROGRAMS: + @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list + +traffic_top$(EXEEXT): $(traffic_top_OBJECTS) $(traffic_top_DEPENDENCIES) $(EXTRA_traffic_top_DEPENDENCIES) + @rm -f traffic_top$(EXEEXT) + $(AM_V_CXXLD)$(traffic_top_LINK) $(traffic_top_OBJECTS) $(traffic_top_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/traffic_top-traffic_top.Po@am__quote@ + +.cc.o: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< + +.cc.obj: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cc.lo: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< + +traffic_top-traffic_top.o: traffic_top.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(traffic_top_CPPFLAGS) $(CPPFLAGS) $(traffic_top_CXXFLAGS) $(CXXFLAGS) -MT traffic_top-traffic_top.o -MD -MP -MF $(DEPDIR)/traffic_top-traffic_top.Tpo -c -o traffic_top-traffic_top.o `test -f 'traffic_top.cc' || echo '$(srcdir)/'`traffic_top.cc +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/traffic_top-traffic_top.Tpo $(DEPDIR)/traffic_top-traffic_top.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='traffic_top.cc' object='traffic_top-traffic_top.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(traffic_top_CPPFLAGS) $(CPPFLAGS) $(traffic_top_CXXFLAGS) $(CXXFLAGS) -c -o traffic_top-traffic_top.o `test -f 'traffic_top.cc' || echo '$(srcdir)/'`traffic_top.cc + +traffic_top-traffic_top.obj: traffic_top.cc +@am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(traffic_top_CPPFLAGS) $(CPPFLAGS) $(traffic_top_CXXFLAGS) $(CXXFLAGS) -MT traffic_top-traffic_top.obj -MD -MP -MF $(DEPDIR)/traffic_top-traffic_top.Tpo -c -o traffic_top-traffic_top.obj `if test -f 'traffic_top.cc'; then $(CYGPATH_W) 'traffic_top.cc'; else $(CYGPATH_W) '$(srcdir)/traffic_top.cc'; fi` +@am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/traffic_top-traffic_top.Tpo $(DEPDIR)/traffic_top-traffic_top.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='traffic_top.cc' object='traffic_top-traffic_top.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(traffic_top_CPPFLAGS) $(CPPFLAGS) $(traffic_top_CXXFLAGS) $(CXXFLAGS) -c -o traffic_top-traffic_top.obj `if test -f 'traffic_top.cc'; then $(CYGPATH_W) 'traffic_top.cc'; else $(CYGPATH_W) '$(srcdir)/traffic_top.cc'; fi` + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +@BUILD_TSTOP_FALSE@install-exec-hook: +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-binPROGRAMS + @$(NORMAL_INSTALL) + $(MAKE) $(AM_MAKEFLAGS) install-exec-hook +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS + +.MAKE: install-am install-exec-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \ + clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ + ctags ctags-am distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-binPROGRAMS \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-exec-hook install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ + uninstall-binPROGRAMS + + +# Install a compatibility symlink because traffic_top was named tstop in previous releases. +@BUILD_TSTOP_TRUE@install-exec-hook: +@BUILD_TSTOP_TRUE@ cd $(DESTDIR)$(bindir) && $(RM) tstop && $(LN_S) traffic_top tstop + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/cmd/traffic_top/README b/cmd/traffic_top/README new file mode 100644 index 00000000..10f33b70 --- /dev/null +++ b/cmd/traffic_top/README @@ -0,0 +1,4 @@ +Top type program for Apache Traffic Server that displays common +statistical information about the server. Requires the server to be +running the stats_over_http plugin. + diff --git a/cmd/traffic_top/stats.h b/cmd/traffic_top/stats.h new file mode 100644 index 00000000..860342f1 --- /dev/null +++ b/cmd/traffic_top/stats.h @@ -0,0 +1,456 @@ +/** @file + + Include file for the tstop stats. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#define __STDC_FORMAT_MACROS 1 + +#include +#include +#include +#include +#include +#include +#include +#include "mgmtapi.h" + +using namespace std; + +struct LookupItem { + LookupItem(const char *s, const char *n, const int t): pretty(s), name(n), type(t) {} + LookupItem(const char *s, const char *n, const char *d, const int t): pretty(s), name(n), numerator(n), denominator(d), type(t) {} + const char *pretty; + const char *name; + const char *numerator; + const char *denominator; + int type; +}; +extern size_t write_data(void *ptr, size_t size, size_t nmemb, void *stream); +extern char curl_error[CURL_ERROR_SIZE]; +extern string response; + +namespace constant { + const char global[] = "\"global\": {\n"; + const char start[] = "\"proxy.process."; + const char seperator[] = "\": \""; + const char end[] = "\",\n"; +}; + + +//---------------------------------------------------------------------------- +class Stats { +public: + Stats(const string &url): _url(url) { + + if (url != "") { + if (_url.substr(0, 4) != "http") { + // looks like it is a host using it the old way + _url = "http://" + _url + "/_stats"; + } + + // set the host + size_t start = _url.find(":"); + size_t end = _url.find("/", start + 3); + _host = _url.substr(start + 3, end - start - 3); + end = _host.find(":"); + if (end != string::npos) { + _host = _host.substr(0, end); + } + } else { + char hostname[25]; + hostname[sizeof(hostname) - 1] = '\0'; + gethostname(hostname, sizeof(hostname) - 1); + _host = hostname; + } + + _stats = NULL; + _old_stats = NULL; + _absolute = false; + lookup_table.insert(make_pair("version", LookupItem("Version", "proxy.process.version.server.short", 1))); + lookup_table.insert(make_pair("disk_used", LookupItem("Disk Used", "proxy.process.cache.bytes_used", 1))); + lookup_table.insert(make_pair("disk_total", LookupItem("Disk Total", "proxy.process.cache.bytes_total", 1))); + lookup_table.insert(make_pair("ram_used", LookupItem("Ram Used", "proxy.process.cache.ram_cache.bytes_used", 1))); + lookup_table.insert(make_pair("ram_total", LookupItem("Ram Total", "proxy.process.cache.ram_cache.total_bytes", 1))); + lookup_table.insert(make_pair("lookups", LookupItem("Lookups", "proxy.process.http.cache_lookups", 2))); + lookup_table.insert(make_pair("cache_writes", LookupItem("Writes", "proxy.process.http.cache_writes", 2))); + lookup_table.insert(make_pair("cache_updates", LookupItem("Updates", "proxy.process.http.cache_updates", 2))); + lookup_table.insert(make_pair("cache_deletes", LookupItem("Deletes", "proxy.process.http.cache_deletes", 2))); + lookup_table.insert(make_pair("read_active", LookupItem("Read Active", "proxy.process.cache.read.active", 1))); + lookup_table.insert(make_pair("write_active", LookupItem("Writes Active", "proxy.process.cache.write.active", 1))); + lookup_table.insert(make_pair("update_active", LookupItem("Update Active", "proxy.process.cache.update.active", 1))); + lookup_table.insert(make_pair("entries", LookupItem("Entries", "proxy.process.cache.direntries.used", 1))); + lookup_table.insert(make_pair("avg_size", LookupItem("Avg Size", "disk_used", "entries", 3))); + + lookup_table.insert(make_pair("dns_time", LookupItem("DNS Time", "proxy.node.dns.lookup_avg_time_ms", 2))); + lookup_table.insert(make_pair("dns_hits", LookupItem("DNS hits", "proxy.node.hostdb.total_hits", 2))); + lookup_table.insert(make_pair("dns_lookups", LookupItem("DNS lookups", "proxy.node.hostdb.total_lookups", 2))); + lookup_table.insert(make_pair("client_req", LookupItem("Requests", "proxy.process.http.incoming_requests", 2))); + lookup_table.insert(make_pair("client_conn", LookupItem("New Conn", "proxy.process.http.total_client_connections", 2))); + lookup_table.insert(make_pair("client_req_conn", LookupItem("Req/Conn", "client_req", "client_conn", 3))); + lookup_table.insert(make_pair("client_curr_conn", LookupItem("Curr Conn", "proxy.process.http.current_client_connections", 1))); + lookup_table.insert(make_pair("client_actv_conn", LookupItem("Active Con", "proxy.process.http.current_active_client_connections", 1))); + + lookup_table.insert(make_pair("server_req", LookupItem("Requests", "proxy.process.http.outgoing_requests", 2))); + lookup_table.insert(make_pair("server_conn", LookupItem("New Conn", "proxy.process.http.total_server_connections", 2))); + lookup_table.insert(make_pair("server_req_conn", LookupItem("Req/Conn", "server_req", "server_conn", 3))); + lookup_table.insert(make_pair("server_curr_conn", LookupItem("Curr Conn", "proxy.process.http.current_server_connections", 1))); + + + lookup_table.insert(make_pair("client_head", LookupItem("Head Bytes", "proxy.process.http.user_agent_response_header_total_size", 2))); + lookup_table.insert(make_pair("client_body", LookupItem("Body Bytes", "proxy.process.http.user_agent_response_document_total_size", 2))); + lookup_table.insert(make_pair("server_head", LookupItem("Head Bytes", "proxy.process.http.origin_server_response_header_total_size", 2))); + lookup_table.insert(make_pair("server_body", LookupItem("Body Bytes", "proxy.process.http.origin_server_response_document_total_size", 2))); + + // not used directly + lookup_table.insert(make_pair("ram_hit", LookupItem("Ram Hit", "proxy.process.cache.ram_cache.hits", 2))); + lookup_table.insert(make_pair("ram_miss", LookupItem("Ram Misses", "proxy.process.cache.ram_cache.misses", 2))); + + + + lookup_table.insert(make_pair("client_abort", LookupItem("Clnt Abort", "proxy.process.http.err_client_abort_count_stat", 2))); + lookup_table.insert(make_pair("conn_fail", LookupItem("Conn Fail", "proxy.process.http.err_connect_fail_count_stat", 2))); + lookup_table.insert(make_pair("abort", LookupItem("Abort", "proxy.process.http.transaction_counts.errors.aborts", 2))); + lookup_table.insert(make_pair("t_conn_fail", LookupItem("Conn Fail", "proxy.process.http.transaction_counts.errors.connect_failed", 2))); + lookup_table.insert(make_pair("other_err", LookupItem("Other Err", "proxy.process.http.transaction_counts.errors.other", 2))); + // percentage + lookup_table.insert(make_pair("ram_ratio", LookupItem("Ram Hit", "ram_hit", "ram_hit_miss", 4))); + lookup_table.insert(make_pair("dns_ratio", LookupItem("DNS Hit", "dns_hits", "dns_lookups", 4))); + + // percentage of requests + lookup_table.insert(make_pair("fresh", LookupItem("Fresh", "proxy.process.http.transaction_counts.hit_fresh", 5))); + lookup_table.insert(make_pair("reval", LookupItem("Revalidate", "proxy.process.http.transaction_counts.hit_revalidated", 5))); + lookup_table.insert(make_pair("cold", LookupItem("Cold", "proxy.process.http.transaction_counts.miss_cold", 5))); + lookup_table.insert(make_pair("changed", LookupItem("Changed", "proxy.process.http.transaction_counts.miss_changed", 5))); + lookup_table.insert(make_pair("not", LookupItem("Not Cache", "proxy.process.http.transaction_counts.miss_not_cacheable", 5))); + lookup_table.insert(make_pair("no", LookupItem("No Cache", "proxy.process.http.transaction_counts.miss_client_no_cache", 5))); + + lookup_table.insert(make_pair("fresh_time", LookupItem("Fresh (ms)", "proxy.process.http.transaction_totaltime.hit_fresh", "fresh", 8))); + lookup_table.insert(make_pair("reval_time", LookupItem("Reval (ms)", "proxy.process.http.transaction_totaltime.hit_revalidated", "reval", 8))); + lookup_table.insert(make_pair("cold_time", LookupItem("Cold (ms)", "proxy.process.http.transaction_totaltime.miss_cold", "cold", 8))); + lookup_table.insert(make_pair("changed_time", LookupItem("Chang (ms)", "proxy.process.http.transaction_totaltime.miss_changed", "changed", 8))); + lookup_table.insert(make_pair("not_time", LookupItem("Not (ms)", "proxy.process.http.transaction_totaltime.miss_not_cacheable", "not", 8))); + lookup_table.insert(make_pair("no_time", LookupItem("No (ms)", "proxy.process.http.transaction_totaltime.miss_client_no_cache", "no", 8))); + + lookup_table.insert(make_pair("get", LookupItem("GET", "proxy.process.http.get_requests", 5))); + lookup_table.insert(make_pair("head", LookupItem("HEAD", "proxy.process.http.head_requests", 5))); + lookup_table.insert(make_pair("post", LookupItem("POST", "proxy.process.http.post_requests", 5))); + + lookup_table.insert(make_pair("100", LookupItem("100", "proxy.process.http.100_responses", 5))); + lookup_table.insert(make_pair("101", LookupItem("101", "proxy.process.http.101_responses", 5))); + lookup_table.insert(make_pair("1xx", LookupItem("1xx", "proxy.process.http.1xx_responses", 5))); + lookup_table.insert(make_pair("200", LookupItem("200", "proxy.process.http.200_responses", 5))); + lookup_table.insert(make_pair("201", LookupItem("201", "proxy.process.http.201_responses", 5))); + lookup_table.insert(make_pair("202", LookupItem("202", "proxy.process.http.202_responses", 5))); + lookup_table.insert(make_pair("203", LookupItem("203", "proxy.process.http.203_responses", 5))); + lookup_table.insert(make_pair("204", LookupItem("204", "proxy.process.http.204_responses", 5))); + lookup_table.insert(make_pair("205", LookupItem("205", "proxy.process.http.205_responses", 5))); + lookup_table.insert(make_pair("206", LookupItem("206", "proxy.process.http.206_responses", 5))); + lookup_table.insert(make_pair("2xx", LookupItem("2xx", "proxy.process.http.2xx_responses", 5))); + lookup_table.insert(make_pair("300", LookupItem("300", "proxy.process.http.300_responses", 5))); + lookup_table.insert(make_pair("301", LookupItem("301", "proxy.process.http.301_responses", 5))); + lookup_table.insert(make_pair("302", LookupItem("302", "proxy.process.http.302_responses", 5))); + lookup_table.insert(make_pair("303", LookupItem("303", "proxy.process.http.303_responses", 5))); + lookup_table.insert(make_pair("304", LookupItem("304", "proxy.process.http.304_responses", 5))); + lookup_table.insert(make_pair("305", LookupItem("305", "proxy.process.http.305_responses", 5))); + lookup_table.insert(make_pair("307", LookupItem("307", "proxy.process.http.307_responses", 5))); + lookup_table.insert(make_pair("3xx", LookupItem("3xx", "proxy.process.http.3xx_responses", 5))); + lookup_table.insert(make_pair("400", LookupItem("400", "proxy.process.http.400_responses", 5))); + lookup_table.insert(make_pair("401", LookupItem("401", "proxy.process.http.401_responses", 5))); + lookup_table.insert(make_pair("402", LookupItem("402", "proxy.process.http.402_responses", 5))); + lookup_table.insert(make_pair("403", LookupItem("403", "proxy.process.http.403_responses", 5))); + lookup_table.insert(make_pair("404", LookupItem("404", "proxy.process.http.404_responses", 5))); + lookup_table.insert(make_pair("405", LookupItem("405", "proxy.process.http.405_responses", 5))); + lookup_table.insert(make_pair("406", LookupItem("406", "proxy.process.http.406_responses", 5))); + lookup_table.insert(make_pair("407", LookupItem("407", "proxy.process.http.407_responses", 5))); + lookup_table.insert(make_pair("408", LookupItem("408", "proxy.process.http.408_responses", 5))); + lookup_table.insert(make_pair("409", LookupItem("409", "proxy.process.http.409_responses", 5))); + lookup_table.insert(make_pair("410", LookupItem("410", "proxy.process.http.410_responses", 5))); + lookup_table.insert(make_pair("411", LookupItem("411", "proxy.process.http.411_responses", 5))); + lookup_table.insert(make_pair("412", LookupItem("412", "proxy.process.http.412_responses", 5))); + lookup_table.insert(make_pair("413", LookupItem("413", "proxy.process.http.413_responses", 5))); + lookup_table.insert(make_pair("414", LookupItem("414", "proxy.process.http.414_responses", 5))); + lookup_table.insert(make_pair("415", LookupItem("415", "proxy.process.http.415_responses", 5))); + lookup_table.insert(make_pair("416", LookupItem("416", "proxy.process.http.416_responses", 5))); + lookup_table.insert(make_pair("4xx", LookupItem("4xx", "proxy.process.http.4xx_responses", 5))); + lookup_table.insert(make_pair("500", LookupItem("500", "proxy.process.http.500_responses", 5))); + lookup_table.insert(make_pair("501", LookupItem("501", "proxy.process.http.501_responses", 5))); + lookup_table.insert(make_pair("502", LookupItem("502", "proxy.process.http.502_responses", 5))); + lookup_table.insert(make_pair("503", LookupItem("503", "proxy.process.http.503_responses", 5))); + lookup_table.insert(make_pair("504", LookupItem("504", "proxy.process.http.504_responses", 5))); + lookup_table.insert(make_pair("505", LookupItem("505", "proxy.process.http.505_responses", 5))); + lookup_table.insert(make_pair("5xx", LookupItem("5xx", "proxy.process.http.5xx_responses", 5))); + + lookup_table.insert(make_pair("s_100", LookupItem("100 B", "proxy.process.http.response_document_size_100", 5))); + lookup_table.insert(make_pair("s_1k", LookupItem("1 KB", "proxy.process.http.response_document_size_1K", 5))); + lookup_table.insert(make_pair("s_3k", LookupItem("3 KB", "proxy.process.http.response_document_size_3K", 5))); + lookup_table.insert(make_pair("s_5k", LookupItem("5 KB", "proxy.process.http.response_document_size_5K", 5))); + lookup_table.insert(make_pair("s_10k", LookupItem("10 KB", "proxy.process.http.response_document_size_10K", 5))); + lookup_table.insert(make_pair("s_1m", LookupItem("1 MB", "proxy.process.http.response_document_size_1M", 5))); + lookup_table.insert(make_pair("s_>1m", LookupItem("> 1 MB", "proxy.process.http.response_document_size_inf", 5))); + + + // sum together + lookup_table.insert(make_pair("ram_hit_miss", LookupItem("Ram Hit+Miss", "ram_hit", "ram_miss", 6))); + lookup_table.insert(make_pair("client_net", LookupItem("Net (bits)", "client_head", "client_body", 7))); + lookup_table.insert(make_pair("client_size", LookupItem("Total Size", "client_head", "client_body", 6))); + lookup_table.insert(make_pair("client_avg_size", LookupItem("Avg Size", "client_size", "client_req", 3))); + + lookup_table.insert(make_pair("server_net", LookupItem("Net (bits)", "server_head", "server_body", 7))); + lookup_table.insert(make_pair("server_size", LookupItem("Total Size", "server_head", "server_body", 6))); + lookup_table.insert(make_pair("server_avg_size", LookupItem("Avg Size", "server_size", "server_req", 3))); + + + lookup_table.insert(make_pair("total_time", LookupItem("Total Time", "proxy.process.http.total_transactions_time", 2))); + lookup_table.insert(make_pair("client_req_time", LookupItem("Resp (ms)", "total_time", "client_req", 3))); + } + + void getStats() { + + if (_url == "") { + int64_t value = 0; + if (_old_stats != NULL) { + delete _old_stats; + _old_stats = NULL; + } + _old_stats = _stats; + _stats = new map; + + gettimeofday(&_time, NULL); + double now = _time.tv_sec + (double)_time.tv_usec / 1000000; + + for (map::const_iterator lookup_it = lookup_table.begin(); + lookup_it != lookup_table.end(); ++lookup_it) { + const LookupItem &item = lookup_it->second; + + if (item.type == 1 || item.type == 2 || item.type == 5 || item.type == 8) { + if (strcmp(item.pretty, "Version") == 0) { + // special case for Version information + TSString strValue = NULL; + assert(TSRecordGetString(item.name, &strValue) == TS_ERR_OKAY); + string key = item.name; + (*_stats)[key] = strValue; + } else { + assert(TSRecordGetInt(item.name, &value) == TS_ERR_OKAY); + string key = item.name; + char buffer[32]; + sprintf(buffer, "%" PRId64, value); + string foo = buffer; + (*_stats)[key] = foo; + } + } + } + _old_time = _now; + _now = now; + _time_diff = _now - _old_time; + } else { + CURL *curl; + CURLcode res; + + curl = curl_easy_init(); + if (curl) { + curl_easy_setopt(curl, CURLOPT_URL, _url.c_str()); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); + curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_error); + + // update time + gettimeofday(&_time, NULL); + double now = _time.tv_sec + (double)_time.tv_usec / 1000000; + + response.clear(); + response.reserve(32768); // should hopefully be smaller then 32KB + res = curl_easy_perform(curl); + + // only if success update stats and time information + if (res == 0) { + if (_old_stats != NULL) { + delete _old_stats; + _old_stats = NULL; + } + _old_stats = _stats; + _stats = new map; + + // parse + parseResponse(response); + _old_time = _now; + _now = now; + _time_diff = _now - _old_time; + } else { + fprintf(stderr, "Can't fetch url %s", _url.c_str()); + abort(); + } + + /* always cleanup */ + curl_easy_cleanup(curl); + } + } + } + + int64_t getValue(const string &key, const map *stats) const { + map::const_iterator stats_it = stats->find(key); + if (stats_it == stats->end()) + return 0; + int64_t value = atoll(stats_it->second.c_str()); + return value; + } + + void getStat(const string &key, double &value, int overrideType = 0) { + string strtmp; + int typetmp; + getStat(key, value, strtmp, typetmp, overrideType); + } + + + void getStat(const string &key, string &value) { + map::const_iterator lookup_it = lookup_table.find(key); + assert(lookup_it != lookup_table.end()); + const LookupItem &item = lookup_it->second; + + map::const_iterator stats_it = _stats->find(item.name); + if (stats_it == _stats->end()) + value = ""; + else + value = stats_it->second.c_str(); + } + + void getStat(const string &key, double &value, string &prettyName, int &type, int overrideType = 0) { + map::const_iterator lookup_it = lookup_table.find(key); + assert(lookup_it != lookup_table.end()); + const LookupItem &item = lookup_it->second; + prettyName = item.pretty; + if (overrideType != 0) + type = overrideType; + else + type = item.type; + + if (type == 1 || type == 2 || type == 5 || type == 8) { + value = getValue(item.name, _stats); + if (key == "total_time") { + value = value / 10000000; + } + + if ((type == 2 || type == 5 || type == 8) && _old_stats != NULL && _absolute == false) { + double old = getValue(item.name, _old_stats); + if (key == "total_time") { + old = old / 10000000; + } + value = (value - old) / _time_diff; + } + } else if (type == 3 || type == 4) { + double numerator; + double denominator; + getStat(item.numerator, numerator); + getStat(item.denominator, denominator); + if (denominator == 0) + value = 0; + else + value = numerator / denominator; + if (type == 4) { + value *= 100; + } + } else if (type == 6 || type == 7) { + double numerator; + double denominator; + getStat(item.numerator, numerator, 2); + getStat(item.denominator, denominator, 2); + value = numerator + denominator; + if (type == 7) + value *= 8; + } + + if (type == 8) { + double denominator; + getStat(item.denominator, denominator, 2); + if (denominator == 0) + value = 0; + else + value = value / denominator * 1000; + } + + if (type == 5) { + double denominator = 0; + getStat("client_req", denominator); + if (denominator == 0) + value = 0; + else + value = value / denominator * 100; + } + } + + bool toggleAbsolute() { + if (_absolute == true) + _absolute = false; + else + _absolute = true; + + return _absolute; + } + + void parseResponse(const string &response) { + // move past global + size_t pos = response.find(constant::global); + pos += sizeof(constant::global) - 1; + + // find parts of the line + while (1) { + size_t start = response.find(constant::start, pos); + size_t seperator = response.find(constant::seperator, pos); + size_t end = response.find(constant::end, pos); + + if (start == string::npos || seperator == string::npos || end == string::npos) + return; + + //cout << constant::start << " " << start << endl; + //cout << constant::seperator << " " << seperator << endl; + //cout << constant::end << " " << end << endl; + + string key = response.substr(start + 1, seperator - start - 1); + string value = response.substr(seperator + sizeof(constant::seperator) - 1, end - seperator - sizeof(constant::seperator) + 1); + + (*_stats)[key] = value; + //cout << "key " << key << " " << "value " << value << endl; + pos = end + sizeof(constant::end) - 1; + //cout << "pos: " << pos << endl; + } + } + + const string& getHost() const { return _host; } + + ~Stats() { + if (_stats != NULL) { + delete _stats; + } + if (_old_stats != NULL) { + delete _old_stats; + } + } + +private: + map *_stats; + map *_old_stats; + map lookup_table; + string _url; + string _host; + double _old_time; + double _now; + double _time_diff; + struct timeval _time; + bool _absolute; +}; diff --git a/cmd/traffic_top/traffic_top.cc b/cmd/traffic_top/traffic_top.cc new file mode 100644 index 00000000..cbf87540 --- /dev/null +++ b/cmd/traffic_top/traffic_top.cc @@ -0,0 +1,465 @@ +/** @file + + Main file for the traffic_top application. + + @section license License + + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "ink_config.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// At least on solaris, the default ncurses defines macros such as +// clear() that break stdlibc++. +#define NOMACROS 1 +#define NCURSES_NOMACROS 1 + +#if defined HAVE_NCURSESW_CURSES_H +# include +#elif defined HAVE_NCURSESW_H +# include +#elif defined HAVE_NCURSES_CURSES_H +# include +#elif defined HAVE_NCURSES_H +# include +#elif defined HAVE_CURSES_H +# include +#else +# error "SysV or X/Open-compatible Curses header file required" +#endif + +#include "stats.h" + +using namespace std; +char curl_error[CURL_ERROR_SIZE]; +string response; + +namespace colorPair { + const short red = 1; + const short yellow = 2; + const short green = 3; + const short blue = 4; + // const short black = 5; + const short grey = 6; + const short cyan = 7; + const short border = 8; +}; + +//---------------------------------------------------------------------------- +static void prettyPrint(const int x, const int y, const double number, const int type) { + char buffer[32]; + char exp = ' '; + double my_number = number; + short color; + if (number > 1000000000000LL) { + my_number = number / 1000000000000LL; + exp = 'T'; + color = colorPair::red; + } else if (number > 1000000000) { + my_number = number / 1000000000; + exp = 'G'; + color = colorPair::red; + } else if (number > 1000000) { + my_number = number / 1000000; + exp = 'M'; + color = colorPair::yellow; + } else if (number > 1000) { + my_number = number / 1000; + exp = 'K'; + color = colorPair::cyan; + } else if (my_number <= .09) { + color = colorPair::grey; + } else { + color = colorPair::green; + } + + if (type == 4 || type == 5) { + if (number > 90) { + color = colorPair::red; + } else if (number > 80) { + color = colorPair::yellow; + } else if (number > 50) { + color = colorPair::blue; + } else if (my_number <= .09) { + color = colorPair::grey; + } else { + color = colorPair::green; + } + snprintf(buffer, sizeof(buffer), "%6.1f%%%%", (double)my_number); + } else + snprintf(buffer, sizeof(buffer), "%6.1f%c", (double)my_number, exp); + attron(COLOR_PAIR(color)); + attron(A_BOLD); + mvprintw(y, x, buffer); + attroff(COLOR_PAIR(color)); + attroff(A_BOLD); +} + +//---------------------------------------------------------------------------- +static void makeTable(const int x, const int y, const list &items, Stats &stats) { + int my_y = y; + + for (list::const_iterator it = items.begin(); it != items.end(); ++it) { + string prettyName; + double value = 0; + int type; + + stats.getStat(*it, value, prettyName, type); + mvprintw(my_y, x, prettyName.c_str()); + prettyPrint(x + 10, my_y++, value, type); + } +} + +//---------------------------------------------------------------------------- +size_t write_data(void *ptr, size_t size, size_t nmemb, void * /* stream */) +{ + response.append((char*)ptr, size * nmemb); + //cout << "appending: " << size * nmemb << endl; + //int written = fwrite(ptr, size, nmemb, (FILE *)stream); + return size * nmemb; +} + +//---------------------------------------------------------------------------- +static void response_code_page(Stats &stats) { + attron(COLOR_PAIR(colorPair::border)); + attron(A_BOLD); + mvprintw(0, 0, " RESPONSE CODES "); + attroff(COLOR_PAIR(colorPair::border)); + attroff(A_BOLD); + + list response1; + response1.push_back("100"); + response1.push_back("101"); + response1.push_back("1xx"); + response1.push_back("200"); + response1.push_back("201"); + response1.push_back("202"); + response1.push_back("203"); + response1.push_back("204"); + response1.push_back("205"); + response1.push_back("206"); + response1.push_back("2xx"); + response1.push_back("300"); + response1.push_back("301"); + response1.push_back("302"); + response1.push_back("303"); + response1.push_back("304"); + response1.push_back("305"); + response1.push_back("307"); + response1.push_back("3xx"); + makeTable(0, 1, response1, stats); + + list response2; + response2.push_back("400"); + response2.push_back("401"); + response2.push_back("402"); + response2.push_back("403"); + response2.push_back("404"); + response2.push_back("405"); + response2.push_back("406"); + response2.push_back("407"); + response2.push_back("408"); + response2.push_back("409"); + response2.push_back("410"); + response2.push_back("411"); + response2.push_back("412"); + response2.push_back("413"); + response2.push_back("414"); + response2.push_back("415"); + response2.push_back("416"); + response2.push_back("4xx"); + makeTable(21, 1, response2, stats); + + list response3; + response3.push_back("500"); + response3.push_back("501"); + response3.push_back("502"); + response3.push_back("503"); + response3.push_back("504"); + response3.push_back("505"); + response3.push_back("5xx"); + makeTable(42, 1, response3, stats); +} + +//---------------------------------------------------------------------------- +static void help(const string &host, const string &version) { + timeout(1000); + + while(1) { + clear(); + time_t now = time(NULL); + struct tm *nowtm = localtime(&now); + char timeBuf[32]; + strftime(timeBuf, sizeof(timeBuf), "%H:%M:%S", nowtm); + + //clear(); + attron(A_BOLD); mvprintw(0, 0, "Overview:"); attroff(A_BOLD); + mvprintw(1, 0, + "traffic_top is a top like program for Apache Traffic Server (ATS). " + "There is a lot of statistical information gathered by ATS. " + "This program tries to show some of the more important stats and gives a good overview of what the proxy server is doing. " + "Hopefully this can be used as a tool for diagnosing the proxy server if there are problems."); + + attron(A_BOLD); mvprintw(7, 0, "Definitions:"); attroff(A_BOLD); + mvprintw(8, 0, "Fresh => Requests that were servered by fresh entries in cache"); + mvprintw(9, 0, "Revalidate => Requests that contacted the origin to verify if still valid"); + mvprintw(10, 0, "Cold => Requests that were not in cache at all"); + mvprintw(11, 0, "Changed => Requests that required entries in cache to be updated"); + mvprintw(12, 0, "Changed => Requests that can't be cached for some reason"); + mvprintw(12, 0, "No Cache => Requests that the client sent Cache-Control: no-cache header"); + + attron(COLOR_PAIR(colorPair::border)); + attron(A_BOLD); + mvprintw(23, 0, "%s - %.12s - %.12s (b)ack ", timeBuf, version.c_str(), host.c_str()); + attroff(COLOR_PAIR(colorPair::border)); + attroff(A_BOLD); + refresh(); + int x = getch(); + if (x == 'b') + break; + } +} + +static void usage() +{ + fprintf(stderr, "Usage: traffic_top [-s seconds] [URL|hostname|hostname:port]\n"); + exit(1); +} + +//---------------------------------------------------------------------------- +void main_stats_page(Stats &stats) +{ + attron(COLOR_PAIR(colorPair::border)); + attron(A_BOLD); + mvprintw(0, 0, " CACHE INFORMATION "); + mvprintw(0, 40, " CLIENT REQUEST & RESPONSE "); + mvprintw(16, 0, " CLIENT "); + mvprintw(16, 40, " ORIGIN SERVER "); + + for (int i = 0; i <= 22; ++i) { + mvprintw(i, 39, " "); + } + attroff(COLOR_PAIR(colorPair::border)); + attroff(A_BOLD); + + list cache1; + cache1.push_back("disk_used"); + cache1.push_back("disk_total"); + cache1.push_back("ram_used"); + cache1.push_back("ram_total"); + cache1.push_back("lookups"); + cache1.push_back("cache_writes"); + cache1.push_back("cache_updates"); + cache1.push_back("cache_deletes"); + cache1.push_back("read_active"); + cache1.push_back("write_active"); + cache1.push_back("update_active"); + cache1.push_back("entries"); + cache1.push_back("avg_size"); + cache1.push_back("dns_lookups"); + cache1.push_back("dns_hits"); + makeTable(0, 1, cache1, stats); + + list cache2; + cache2.push_back("ram_ratio"); + cache2.push_back("fresh"); + cache2.push_back("reval"); + cache2.push_back("cold"); + cache2.push_back("changed"); + cache2.push_back("not"); + cache2.push_back("no"); + cache2.push_back("fresh_time"); + cache2.push_back("reval_time"); + cache2.push_back("cold_time"); + cache2.push_back("changed_time"); + cache2.push_back("not_time"); + cache2.push_back("no_time"); + cache2.push_back("dns_ratio"); + cache2.push_back("dns_time"); + makeTable(21, 1, cache2, stats); + + list response1; + response1.push_back("get"); + response1.push_back("head"); + response1.push_back("post"); + response1.push_back("2xx"); + response1.push_back("3xx"); + response1.push_back("4xx"); + response1.push_back("5xx"); + response1.push_back("conn_fail"); + response1.push_back("other_err"); + response1.push_back("abort"); + makeTable(41, 1, response1, stats); + + list response2; + response2.push_back("200"); + response2.push_back("206"); + response2.push_back("301"); + response2.push_back("302"); + response2.push_back("304"); + response2.push_back("404"); + response2.push_back("502"); + response2.push_back("s_100"); + response2.push_back("s_1k"); + response2.push_back("s_3k"); + response2.push_back("s_5k"); + response2.push_back("s_10k"); + response2.push_back("s_1m"); + response2.push_back("s_>1m"); + makeTable(62, 1, response2, stats); + + list client1; + client1.push_back("client_req"); + client1.push_back("client_req_conn"); + client1.push_back("client_conn"); + client1.push_back("client_curr_conn"); + client1.push_back("client_actv_conn"); + makeTable(0, 17, client1, stats); + + list client2; + client2.push_back("client_head"); + client2.push_back("client_body"); + client2.push_back("client_avg_size"); + client2.push_back("client_net"); + client2.push_back("client_req_time"); + makeTable(21, 17, client2, stats); + + list server1; + server1.push_back("server_req"); + server1.push_back("server_req_conn"); + server1.push_back("server_conn"); + server1.push_back("server_curr_conn"); + makeTable(41, 17, server1, stats); + + list server2; + server2.push_back("server_head"); + server2.push_back("server_body"); + server2.push_back("server_avg_size"); + server2.push_back("server_net"); + makeTable(62, 17, server2, stats); +} + +//---------------------------------------------------------------------------- +int main(int argc, char **argv) +{ + int sleep_time = 5000; + bool absolute = false; + int opt; + while ((opt = getopt(argc, argv, "s:")) != -1) { + switch(opt) { + case 's': + sleep_time = atoi(optarg) * 1000; + break; + default: + usage(); + } + } + + string url = ""; + if (optind >= argc) { + if (TS_ERR_OKAY != TSInit(NULL, static_cast(TS_MGMT_OPT_NO_EVENTS | TS_MGMT_OPT_NO_SOCK_TESTS))) { + fprintf(stderr, "Error: missing URL on command line or error connecting to the local manager\n"); + usage(); + } + } else { + url = argv[optind]; + } + Stats stats(url); + stats.getStats(); + const string &host = stats.getHost(); + + initscr(); + curs_set(0); + + start_color(); /* Start color functionality */ + + init_pair(colorPair::red, COLOR_RED, COLOR_BLACK); + init_pair(colorPair::yellow, COLOR_YELLOW, COLOR_BLACK); + init_pair(colorPair::grey, COLOR_BLACK, COLOR_BLACK); + init_pair(colorPair::green, COLOR_GREEN, COLOR_BLACK); + init_pair(colorPair::blue, COLOR_BLUE, COLOR_BLACK); + init_pair(colorPair::cyan, COLOR_CYAN, COLOR_BLACK); + init_pair(colorPair::border, COLOR_WHITE, COLOR_BLUE); + // mvchgat(0, 0, -1, A_BLINK, 1, NULL); + + + enum Page {MAIN_PAGE, RESPONSE_PAGE}; + Page page = MAIN_PAGE; + string page_alt = "(r)esponse"; + + while(1) { + attron(COLOR_PAIR(colorPair::border)); + attron(A_BOLD); + + string version; + time_t now = time(NULL); + struct tm *nowtm = localtime(&now); + char timeBuf[32]; + strftime(timeBuf, sizeof(timeBuf), "%H:%M:%S", nowtm); + stats.getStat("version", version); + + mvprintw(23, 0, "%-20.20s %30s (q)uit (h)elp (%c)bsolute ", host.c_str(), page_alt.c_str(), absolute ? 'A' : 'a'); + attroff(COLOR_PAIR(colorPair::border)); + attroff(A_BOLD); + + + if (page == MAIN_PAGE) { + main_stats_page(stats); + } else if (page == RESPONSE_PAGE) { + response_code_page(stats); + } + + curs_set(0); + refresh(); + timeout(sleep_time); + + int x = getch(); + switch (x) { + case 'h': + help(host, version); + break; + case 'q': + goto quit; + case 'm': + page = MAIN_PAGE; + page_alt = "(r)esponse"; + break; + case 'r': + page = RESPONSE_PAGE; + page_alt = "(m)ain"; + break; + case 'a': + absolute = stats.toggleAbsolute(); + } + stats.getStats(); + clear(); + } + +quit: + endwin(); + + return 0; +} diff --git a/config.layout b/config.layout new file mode 100644 index 00000000..678e6779 --- /dev/null +++ b/config.layout @@ -0,0 +1,279 @@ +## +## config.layout -- Pre-defined Installation Path Layouts +## +## Licensed to the Apache Software Foundation (ASF) under one +## or more contributor license agreements. See the NOTICE file +## distributed with this work for additional information +## regarding copyright ownership. The ASF licenses this file +## to you under the Apache License, Version 2.0 (the +## "License"); you may not use this file except in compliance +## with the License. You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## Hints: +## - layouts can be loaded with configure's --enable-layout=ID option +## - when no --enable-layout option is given, the default layout is +## `Trafficserver' +## - a trailing plus character (`+') on paths is replaced with a +## `/' suffix where is currently hardcoded to +## 'trafficserver'. +## (This may become a configurable parameter at some point.) +## + +# Classical TrafficServer path layout. + + prefix: /usr/local + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/bin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/libexec+ + infodir: ${prefix}/info + mandir: ${prefix}/man + sysconfdir: ${prefix}/etc+ + datadir: ${prefix}/share+ + docdir: ${prefix}/share/doc+ + installbuilddir: ${datadir}/build + includedir: ${prefix}/include + localstatedir: ${prefix}/var + runtimedir: ${localstatedir}+ + logdir: ${localstatedir}/log+ + cachedir: ${localstatedir}+ + + +# Classical Apache path layout. + + prefix: /usr/local/trafficserver + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/bin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/modules + infodir: ${prefix}/info + mandir: ${prefix}/man + sysconfdir: ${prefix}/conf + datadir: ${prefix}/share + docdir: ${prefix}/share/doc+ + installbuilddir: ${datadir}/build + includedir: ${prefix}/include + localstatedir: ${prefix} + runtimedir: ${localstatedir}/logs + logdir: ${localstatedir}/logs + cachedir: ${localstatedir}/cache + + +# GNU standards conforming path layout. +# See FSF's GNU project `make-stds' document for details. + + prefix: /usr/local + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/libexec + infodir: ${prefix}/info + mandir: ${prefix}/man + sysconfdir: ${prefix}/etc+ + datadir: ${prefix}/share+ + docdir: ${prefix}/share/doc+ + installbuilddir: ${datadir}/build + includedir: ${prefix}/include+ + localstatedir: ${prefix}/var+ + runtimedir: ${localstatedir}/run + logdir: ${localstatedir}/log + cachedir: ${localstatedir}/cache + + +# Mac OS X Server (Rhapsody) + + prefix: /Local/Library/TrafficServer + exec_prefix: /usr + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib + libexecdir: /System/Library/TrafficServer/Modules + infodir: ${exec_prefix}/share/info + mandir: ${exec_prefix}/share/man + sysconfdir: ${prefix}/Configuration + datadir: ${prefix}/Share + docdir: ${prefix}/Share/doc+ + installbuilddir: /System/Library/TrafficServer/Build + includedir: /System/Library/Frameworks/TrafficServer.framework/Versions/2.0/Headers + localstatedir: /var + runtimedir: ${prefix}/Logs + logdir: ${prefix}/Logs + cachedir: ${prefix}/TrafficServerCache + + +# Darwin/Mac OS Layout + + prefix: /usr + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/libexec+ + infodir: ${prefix}/share/info + mandir: ${prefix}/share/man + datadir: /Library/TrafficServer + docdir: ${prefix}/share/doc+ + sysconfdir: /etc+ + installbuilddir: ${prefix}/share/trafficserver/build + includedir: ${prefix}/include+ + localstatedir: /var+ + runtimedir: ${localstatedir}/run + logdir: ${localstatedir}/log + cachedir: ${localstatedir}/cache + + +# According to the /opt filesystem conventions + + prefix: /opt/trafficserver + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/libexec + infodir: ${prefix}/info + mandir: ${prefix}/man + sysconfdir: /etc${prefix} + datadir: ${prefix}/share + docdir: ${prefix}/share/doc+ + installbuilddir: ${datadir}/build + includedir: ${prefix}/include + localstatedir: /var${prefix} + runtimedir: ${localstatedir}/run + logdir: ${localstatedir}/logs + cachedir: ${localstatedir}/cache + + + +# Solaris 8 Layout + + prefix: /usr/trafficserver + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/bin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/libexec + infodir: ${prefix}/info + mandir: ${prefix}/man + sysconfdir: /etc+ + datadir: /var+ + docdir: ${prefix}/share/doc+ + installbuilddir: ${datadir}/build + includedir: ${prefix}/include + localstatedir: ${prefix} + runtimedir: /var/run+ + logdir: ${datadir}/logs + cachedir: ${datadir}/cache + + + +# Debian layout + + prefix: /usr + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib+ + libexecdir: ${exec_prefix}/lib/trafficserver/modules + infodir: ${prefix}/share/info + mandir: ${prefix}/share/man + sysconfdir: /etc+ + datadir: /var/cache+ + docdir: ${prefix}/share/doc+ + installbuilddir: ${prefix}/share/trafficserver/build + includedir: ${prefix}/include + localstatedir: /var/run + runtimedir: /var/run+ + logdir: /var/log+ + cachedir: /var/cache+ + + +# Gentoo Linux TrafficServer path layout. + + prefix: /usr + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib+ + libexecdir: ${libdir}/plugins + infodir: ${prefix}/share/info + mandir: ${prefix}/share/man + sysconfdir: /etc+ + datadir: ${prefix}/share+ + docdir: ${prefix}/share/doc+ + installbuilddir: ${datadir}/build + includedir: ${prefix}/include+ + localstatedir: /var+ + runtimedir: /run+ + logdir: /var/log+ + cachedir: /var/cache+ + + +# OpenBSD path layout. + + prefix: /usr/local + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/bin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/libexec+ + infodir: ${prefix}/info + mandir: ${prefix}/man + sysconfdir: /etc+ + datadir: ${prefix}/share+ + docdir: ${prefix}/share/doc+ + installbuilddir: ${datadir}/build + includedir: ${prefix}/include + localstatedir: /var+ + runtimedir: /var/run+ + logdir: /var/log+ + cachedir: ${localstatedir}/cache + + + + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin64 + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib + libexecdir: ${exec_prefix}/libexec64+ + infodir: ${prefix}/info + mandir: ${prefix}/man + sysconfdir: ${prefix}/conf+ + datadir: ${prefix}/share+ + installbuilddir: ${libdir}/trafficserver/build + includedir: ${prefix}/include+ + localstatedir: ${prefix}/var+ + runtimedir: ${prefix}/var/run+ + logdir: ${prefix}/logs+ + cachedir: ${datadir} + + + + prefix: /usr + exec_prefix: ${prefix} + bindir: ${exec_prefix}/bin + sbindir: ${exec_prefix}/sbin + libdir: ${exec_prefix}/lib + libexecdir: ${libdir}+ + infodir: ${prefix}/share/info + mandir: ${prefix}/share/man + sysconfdir: /etc+ + datadir: ${prefix}/share+ + docdir: ${prefix}/share/doc+ + installbuilddir: ${datadir}/build + includedir: ${prefix}/include + localstatedir: /run + runtimedir: /run+ + logdir: /var/log+ + cachedir: /var/lib+ + diff --git a/configure b/configure new file mode 100755 index 00000000..3fd29fb1 --- /dev/null +++ b/configure @@ -0,0 +1,30876 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for Apache Traffic Server 5.0.1. +# +# Report bugs to . +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1 + + test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ + || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and +$0: dev@trafficserver.apache.org about your system, +$0: including any error possibly output before this +$0: message. Then install a modern shell, or manually run +$0: the script under such a shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + +SHELL=${CONFIG_SHELL-/bin/sh} + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='Apache Traffic Server' +PACKAGE_TARNAME='trafficserver' +PACKAGE_VERSION='5.0.1' +PACKAGE_STRING='Apache Traffic Server 5.0.1' +PACKAGE_BUGREPORT='dev@trafficserver.apache.org' +PACKAGE_URL='http://trafficserver.apache.org' + +ac_unique_file="proxy/Main.cc" +ac_default_prefix=/usr/local/trafficserver +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='am__EXEEXT_FALSE +am__EXEEXT_TRUE +LTLIBOBJS +LIBOBJS +TFW_PACKAGE_SUFFIX +SHARED_LDFLAGS +SHARED_CXXLINKFLAGS +SHARED_CXXFLAGS +SHARED_CFLAGS +MGMT_DEFS +iocore_include_dirs +LIBTOOL_LINK_FLAGS +EXTRA_CXX_LDFLAGS +EXTRA_CC_LDFLAGS +EXPAT_LDFLAGS +API_DEFS +BUILD_COLLAPSED_CONNECTION_PLUGIN_FALSE +BUILD_COLLAPSED_CONNECTION_PLUGIN_TRUE +BUILD_REMAP_STATS_PLUGIN_FALSE +BUILD_REMAP_STATS_PLUGIN_TRUE +has_in6_is_addr_unspecified +gethostbyname_r_glibc2 +default_loopback_iface +has_ip_tos +has_so_mark +ip_transparent +use_tproxy +netinet_ip_icmph +netinet_iph +readline_readlineh +ifaddrsh +net_ppp_defsh +mathh +sys_sysmacrosh +sys_mounth +stroptsh +cpioh +allocah +valuesh +libgenh +floath +waith +malloch +siginfoh +ctypeh +netdbh +arpa_nameser_compath +arpa_nameserh +arpa_ineth +sys_systeminfoh +sys_sysinfoh +sys_sysctlh +sys_sockioh +sys_byteorderh +sys_ioctlh +netinet_tcph +netinet_in_systmh +netinet_inh +schedh +sys_pseth +sys_cpuseth +sys_paramh +pthread_nph +endianh +machine_endianh +sys_eventh +sys_epollh +SPDYLAY_LIBS +SPDYLAY_CFLAGS +BUILD_HTTP_LOAD_FALSE +BUILD_HTTP_LOAD_TRUE +LUA_LDFLAGS +TS_LUAJIT_LDFLAGS +LIBREADLINE +has_tcmalloc +jemalloch +BUILD_HEALTHCHECK_PLUGIN_FALSE +BUILD_HEALTHCHECK_PLUGIN_TRUE +BUILD_GEOIP_PLUGIN_FALSE +BUILD_GEOIP_PLUGIN_TRUE +GEOIP_LIBS +use_hwloc +HWLOC_LIBS +HWLOC_CFLAGS +PKG_CONFIG_LIBDIR +PKG_CONFIG_PATH +use_linux_native_aio +use_posix_cap +LIBCAP +has_128bit_cas +LIBDEMANGLE +has_profiler +LIBPROFILER +use_port +use_kqueue +use_epoll +has_backtrace +execinfoh +pcre_pcreh +pcreh +LIBPCRE +PCRE_CONFIG +expath +LIBEXPAT +LIBTCL +TCL_STUB_LIB_SPEC +TCL_STUB_LIB_FLAG +TCL_STUB_LIB_FILE +TCL_LIB_SPEC +TCL_LIB_FLAG +TCL_LIB_FILE +TCL_SRC_DIR +TCL_BIN_DIR +TCL_PATCH_LEVEL +TCL_VERSION +lzmah +LIBLZMA +zlibh +LIBZ +use_tls_sni +use_tls_eckey +use_tls_alpn +use_tls_npn +OPENSSL_LDFLAGS +OPENSSL_LIBS +OPENSSL_INCLUDES +PKG_CONFIG +LIBCRYPT +BOOST_LDFLAGS +BOOST_CPPFLAGS +BUILD_HAVE_BOOST_FALSE +BUILD_HAVE_BOOST_TRUE +BUILD_TSTOP_FALSE +BUILD_TSTOP_TRUE +CURL_LIBS +CURL_CFLAGS +CURL_CONFIG +CURSES_LIB +LIBRESOLV +CCACHE +ac_cv_sizeof_voidp +FLEX_CFLAGS +AM_CXXFLAGS +AM_CFLAGS +BUILD_HAVE_LIBCXX_FALSE +BUILD_HAVE_LIBCXX_TRUE +BUILD_PERL_LIB_FALSE +BUILD_PERL_LIB_TRUE +PERL +DOXYGEN +LEXLIB +LEX_OUTPUT_ROOT +LEX +YACC +BUILD_MANPAGES_FALSE +BUILD_MANPAGES_TRUE +TS_MAN8_MANPAGES +TS_MAN5_MANPAGES +TS_MAN3_MANPAGES +TS_MAN1_MANPAGES +SPHINXOPTS +SPHINXBUILD +pkgpyexecdir +pyexecdir +pkgpythondir +pythondir +PYTHON_PLATFORM +PYTHON_EXEC_PREFIX +PYTHON_PREFIX +PYTHON_VERSION +PYTHON +ASCPP +RM +OTOOL64 +OTOOL +LIPO +NMEDIT +DSYMUTIL +MANIFEST_TOOL +RANLIB +DLLTOOL +OBJDUMP +NM +ac_ct_DUMPBIN +DUMPBIN +LD +FGREP +LIBTOOL +LN_S +SED +ac_ct_AR +AR +defer_accept +EGREP +GREP +BUILD_HAVE_CXX_11_FALSE +BUILD_HAVE_CXX_11_TRUE +am__fastdepCCAS_FALSE +am__fastdepCCAS_TRUE +CCASDEPMODE +CCASFLAGS +CCAS +CXXCPP +CPP +am__fastdepCXX_FALSE +am__fastdepCXX_TRUE +CXXDEPMODE +ac_ct_CXX +CXXFLAGS +CXX +am__fastdepCC_FALSE +am__fastdepCC_TRUE +CCDEPMODE +am__nodep +AMDEPBACKSLASH +AMDEP_FALSE +AMDEP_TRUE +am__quote +am__include +DEPDIR +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +pkgdocdir +pkgcachedir +pkglogdir +pkgruntimedir +pkglocalstatedir +pkgdatadir +pkgsysconfdir +pkglibexecdir +pkglibdir +pkgsbindir +pkgbindir +rel_docdir +exp_docdir +cachedir +rel_cachedir +exp_cachedir +logdir +rel_logdir +exp_logdir +runtimedir +rel_runtimedir +exp_runtimedir +rel_localstatedir +exp_localstatedir +rel_includedir +exp_includedir +installbuilddir +rel_installbuilddir +exp_installbuilddir +rel_datadir +exp_datadir +rel_sysconfdir +exp_sysconfdir +rel_mandir +exp_mandir +rel_infodir +exp_infodir +rel_libexecdir +exp_libexecdir +rel_libdir +exp_libdir +rel_sbindir +exp_sbindir +rel_bindir +exp_bindir +rel_exec_prefix +exp_exec_prefix +rel_prefix +exp_prefix +BUILD_LUAJIT_FALSE +BUILD_LUAJIT_TRUE +ENABLE_CPPAPI_FALSE +ENABLE_CPPAPI_TRUE +BUILD_TEST_TOOLS_FALSE +BUILD_TEST_TOOLS_TRUE +BUILD_EXAMPLE_PLUGINS_FALSE +BUILD_EXAMPLE_PLUGINS_TRUE +BUILD_EXPERIMENTAL_PLUGINS_FALSE +BUILD_EXPERIMENTAL_PLUGINS_TRUE +max_threads_per_type +max_event_threads +max_host_name_len +max_api_stats +BUILD_SPDY_FALSE +BUILD_SPDY_TRUE +has_spdy +use_reclaimable_freelist +use_freelist +BUILD_INTERIM_CACHE_FALSE +BUILD_INTERIM_CACHE_TRUE +has_interim_cache +BUILD_WCCP_FALSE +BUILD_WCCP_TRUE +has_wccp +enable_remote_cov_commit +STATIC_LIBTS_FALSE +STATIC_LIBTS_TRUE +ink_with_modules_process +ink_with_modules_local +BUILD_TESTS_FALSE +BUILD_TESTS_TRUE +has_tests +use_diags +use_fast_sdk +pkgsysgroup +pkgsysuser +build_group +build_person +build_machine +HOST_GUESS +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +TS_VERSION_MICRO +TS_VERSION_MINOR +TS_VERSION_MAJOR +TS_VERSION_NUMBER +TS_VERSION_STRING +TS_LIBTOOL_VERSION +AM_BACKSLASH +AM_DEFAULT_VERBOSITY +AM_DEFAULT_V +AM_V +am__untar +am__tar +AMTAR +am__leading_dot +SET_MAKE +AWK +mkdir_p +MKDIR_P +INSTALL_STRIP_PROGRAM +STRIP +install_sh +MAKEINFO +AUTOHEADER +AUTOMAKE +AUTOCONF +ACLOCAL +VERSION +PACKAGE +CYGPATH_W +am__isrc +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_silent_rules +enable_static +enable_layout +with_user +with_group +enable_debug +enable_werror +enable_fast_sdk +enable_diags +enable_tests +enable_static_libts +enable_remote_cov_commit +enable_wccp +enable_interim_cache +with_profiler +enable_eventfd +enable_posix_cap +enable_hwloc +enable_ccache +enable_tproxy +enable_freelist +enable_reclaimable_freelist +enable_spdy +with_max_api_stats +with_max_host_name_len +with_max_event_threads +with_max_threads_per_type +enable_experimental_plugins +enable_example_plugins +enable_test_tools +enable_cppapi +enable_luajit +enable_dependency_tracking +enable_shared +with_pic +enable_fast_install +with_gnu_ld +with_sysroot +enable_libtool_lock +with_architecture +with_ncurses +with_ncursesw +with_curl_prefix +with_curl_exec_prefix +with_boost +with_boost_libdir +with_openssl +with_zlib +with_lzma +with_tcl +with_xml +with_expat +with_libxml2 +with_pcre +enable_linux_native_aio +with_jemalloc +with_tcmalloc_lib +with_tcmalloc +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CXX +CXXFLAGS +CCC +CPP +CXXCPP +CCAS +CCASFLAGS +PYTHON +SPHINXBUILD +SPHINXOPTS +DOXYGEN +PERL +CURSES_LIB +CURL_CONFIG +CURL_CFLAGS +CURL_LIBS +PKG_CONFIG +PKG_CONFIG_PATH +PKG_CONFIG_LIBDIR +HWLOC_CFLAGS +HWLOC_LIBS +SPDYLAY_CFLAGS +SPDYLAY_LIBS' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures Apache Traffic Server 5.0.1 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/trafficserver] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of Apache Traffic Server 5.0.1:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-silent-rules less verbose build output (undo: "make V=1") + --disable-silent-rules verbose build output (undo: "make V=0") + --enable-static[=PKGS] build static libraries [default=no] + --enable-layout=LAYOUT Enable LAYOUT specified inside config.layout file + (defaults to TrafficServer) + --enable-debug turn on debugging + --enable-werror turn compiler warnings into errors [default=auto] + --enable-fast-sdk enable fast SDK APIs (no input parameter sanity + checks) + --disable-diags turn off diags + --disable-tests turn off regression tests + --enable-static-libts build some static libts (dev only) + --enable-remote-cov-commit=HOST + commit cov defects to remote host [HOST=localhost] + --enable-wccp enable WCCP v2 + --enable-interim-cache enable interim cache + --disable-eventfd turn off eventfd and use pipes + --disable-posix-cap Use user id switching instead of POSIX capabilities + --disable-hwloc Don't use the hwloc library + --enable-ccache Enable ccache (for developers) + --enable-tproxy[=ARG] Use TPROXY to enable connection transparency. 'auto' + or omitted for local system default, 'no' to + disable, 'force' to use built in default, number to + use as IP_TRANSPARENT sockopt. [default=auto] + + --disable-freelist turn off freelist and use allocators + --enable-reclaimable-freelist + enable reclaimable freelist support (effective only + when freelist is enabled) + --enable-spdy turn on spdy protocol + --enable-experimental-plugins + build experimental plugins + --enable-example-plugins + install example plugins + --enable-test-tools install testing tools + --enable-cppapi Build the c++ api + --disable-luajit turn off LuaJIT + --enable-dependency-tracking + do not reject slow dependency extractors + --disable-dependency-tracking + speeds up one-time build + --enable-shared[=PKGS] build shared libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + --enable-linux-native-aio + enable native Linux AIO support [default=no] + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-user specify the system user [default=nobody] + --with-group specify the system group [default=nobody] + --with-profiler enable support for profiler [default=no] + --with-max-api-stats max number of plugin stats [default=512] + --with-max-host-name-len + max host name length [default=256] + --with-max-event-threads + max number of event threads [default=4096] + --with-max-threads-per-type + max number of threads per event type [default=3072] + --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-sysroot=DIR Search for dependent libraries within DIR + (or the compiler's sysroot if not specified). + --with-architecture=ARCH + use a specific CPU architecture + --with-ncurses force the use of Ncurses or NcursesW + --without-ncursesw do not use NcursesW (wide character support) + --with-curl-prefix=PREFIX + Prefix where curl is installed (optional) + --with-curl-exec-prefix=EPREFIX + Exec prefix where curl is installed (optional) + --with-boost[=ARG] use Boost library from a standard location + (ARG=yes), from the specified location (ARG=), + or disable it (ARG=no) [ARG=yes] + --with-boost-libdir=LIB_DIR + Force given directory for boost libraries. Note that + this will override library path detection, so use + this parameter only if default library detection + fails and you know exactly where your boost + libraries are located. + --with-openssl=DIR root of the OpenSSL directory + --with-zlib=DIR use a specific zlib library + --with-lzma=DIR use a specific lzma library + --with-tcl directory containing tcl configuration + (tclConfig.sh) + --with-xml=(expat|libxml2) + select XML parser + --with-expat=DIR use a specific Expat library + --with-libxml2=DIR use a specific libxml2 library + --with-pcre=DIR use a specific pcre library + --with-jemalloc=DIR use a specific jemalloc library + --with-tcmalloc-lib specify the tcmalloc library to use + [default=tcmalloc] + --with-tcmalloc=DIR use the tcmalloc library + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CPP C preprocessor + CXXCPP C++ preprocessor + CCAS assembler compiler command (defaults to CC) + CCASFLAGS assembler compiler flags (defaults to CFLAGS) + PYTHON the Python interpreter + SPHINXBUILD the sphinx-build documentation generator + SPHINXOPTS additional sphinx-build options + DOXYGEN full path of Doxygen executable + PERL full path of Perl executable + CURSES_LIB linker library for Curses, e.g. -lcurses + CURL_CONFIG config script used for curl + CURL_CFLAGS CFLAGS used for curl + CURL_LIBS LIBS used for curl + PKG_CONFIG path to pkg-config utility + PKG_CONFIG_PATH + directories to add to pkg-config's search path + PKG_CONFIG_LIBDIR + path overriding pkg-config's built-in search path + HWLOC_CFLAGS + C compiler flags for HWLOC, overriding pkg-config + HWLOC_LIBS linker flags for HWLOC, overriding pkg-config + SPDYLAY_CFLAGS + C compiler flags for SPDYLAY, overriding pkg-config + SPDYLAY_LIBS + linker flags for SPDYLAY, overriding pkg-config + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +Apache Traffic Server home page: . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +Apache Traffic Server configure 5.0.1 +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_cpp + +# ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES +# --------------------------------------------------------- +# Tests whether HEADER exists, giving a warning if it cannot be compiled using +# the include files in INCLUDES and setting the cache variable VAR +# accordingly. +ac_fn_cxx_check_header_mongrel () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if eval \${$3+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} +( $as_echo "## ------------------------------------------- ## +## Report this to dev@trafficserver.apache.org ## +## ------------------------------------------- ##" + ) | sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_header_mongrel + +# ac_fn_cxx_try_run LINENO +# ------------------------ +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_cxx_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_run + +# ac_fn_cxx_check_header_compile LINENO HEADER VAR INCLUDES +# --------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_cxx_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_header_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + +# ac_fn_cxx_try_link LINENO +# ------------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_link + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES +# -------------------------------------------- +# Tries to find the compile-time value of EXPR in a program that includes +# INCLUDES, setting VAR accordingly. Returns whether the value could be +# computed +ac_fn_c_compute_int () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if test "$cross_compiling" = yes; then + # Depending upon the size, compute the lo and hi bounds. +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) >= 0)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_lo=0 ac_mid=0 + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) <= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=$ac_mid; break +else + as_fn_arith $ac_mid + 1 && ac_lo=$as_val + if test $ac_lo -le $ac_mid; then + ac_lo= ac_hi= + break + fi + as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) < 0)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=-1 ac_mid=-1 + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) >= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_lo=$ac_mid; break +else + as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val + if test $ac_mid -le $ac_hi; then + ac_lo= ac_hi= + break + fi + as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done +else + ac_lo= ac_hi= +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +# Binary search between lo and hi bounds. +while test "x$ac_lo" != "x$ac_hi"; do + as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +static int test_array [1 - 2 * !(($2) <= $ac_mid)]; +test_array [0] = 0; +return test_array [0]; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_hi=$ac_mid +else + as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +done +case $ac_lo in #(( +?*) eval "$3=\$ac_lo"; ac_retval=0 ;; +'') ac_retval=1 ;; +esac + else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +static long int longval () { return $2; } +static unsigned long int ulongval () { return $2; } +#include +#include +int +main () +{ + + FILE *f = fopen ("conftest.val", "w"); + if (! f) + return 1; + if (($2) < 0) + { + long int i = longval (); + if (i != ($2)) + return 1; + fprintf (f, "%ld", i); + } + else + { + unsigned long int i = ulongval (); + if (i != ($2)) + return 1; + fprintf (f, "%lu", i); + } + /* Do not output a trailing newline, as this causes \r\n confusion + on some platforms. */ + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + echo >>conftest.val; read $3 &5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} +( $as_echo "## ------------------------------------------- ## +## Report this to dev@trafficserver.apache.org ## +## ------------------------------------------- ##" + ) | sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_mongrel + +# ac_fn_cxx_check_decl LINENO SYMBOL VAR INCLUDES +# ----------------------------------------------- +# Tests whether SYMBOL is declared in INCLUDES, setting cache variable VAR +# accordingly. +ac_fn_cxx_check_decl () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + as_decl_name=`echo $2|sed 's/ *(.*//'` + as_decl_use=`echo $2|sed -e 's/(/((/' -e 's/)/) 0&/' -e 's/,/) 0& (/g'` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $as_decl_name is declared" >&5 +$as_echo_n "checking whether $as_decl_name is declared... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +#ifndef $as_decl_name +#ifdef __cplusplus + (void) $as_decl_use; +#else + (void) $as_decl_name; +#endif +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_decl + +# ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES +# ---------------------------------------------------- +# Tries to find if the field MEMBER exists in type AGGR, after including +# INCLUDES, setting cache variable VAR accordingly. +ac_fn_c_check_member () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 +$as_echo_n "checking for $2.$3... " >&6; } +if eval \${$4+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$4=yes" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (sizeof ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$4=yes" +else + eval "$4=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$4 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_member + +# ac_fn_c_check_type LINENO TYPE VAR INCLUDES +# ------------------------------------------- +# Tests whether TYPE exists after having included INCLUDES, setting cache +# variable VAR accordingly. +ac_fn_c_check_type () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=no" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof ($2)) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof (($2))) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + eval "$3=yes" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_type +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by Apache Traffic Server $as_me 5.0.1, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +ac_aux_dir= +for ac_dir in build/aux "$srcdir"/build/aux; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in build/aux \"$srcdir\"/build/aux" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + + + +am__api_version='1.13' + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +$as_echo_n "checking whether build environment is sane... " >&6; } +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[\\\"\#\$\&\'\`$am_lf]*) + as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; +esac +case $srcdir in + *[\\\"\#\$\&\'\`$am_lf\ \ ]*) + as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; +esac + +# Do 'set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + am_has_slept=no + for am_try in 1 2; do + echo "timestamp, slept: $am_has_slept" > conftest.file + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + as_fn_error $? "ls -t appears to fail. Make sure there is not a broken + alias in your environment" "$LINENO" 5 + fi + if test "$2" = conftest.file || test $am_try -eq 2; then + break + fi + # Just in case. + sleep 1 + am_has_slept=yes + done + test "$2" = conftest.file + ) +then + # Ok. + : +else + as_fn_error $? "newly created file is older than distributed files! +Check your system clock" "$LINENO" 5 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +# If we didn't sleep, we still need to ensure time stamps of config.status and +# generated files are strictly newer. +am_sleep_pid= +if grep 'slept: no' conftest.file >/dev/null 2>&1; then + ( sleep 1 ) & + am_sleep_pid=$! +fi + +rm -f conftest.file + +test "$program_prefix" != NONE && + program_transform_name="s&^&$program_prefix&;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s&\$&$program_suffix&;$program_transform_name" +# Double any \ or $. +# By default was `s,x,x', remove it if useless. +ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' +program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` + +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` + +if test x"${MISSING+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; + *) + MISSING="\${SHELL} $am_aux_dir/missing" ;; + esac +fi +# Use eval to expand $SHELL +if eval "$MISSING --is-lightweight"; then + am_missing_run="$MISSING " +else + am_missing_run= + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 +$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} +fi + +if test x"${install_sh}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi + +# Installed binaries are usually stripped using 'strip' when the user +# run "make install-strip". However 'strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the 'STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 +$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } +if test -z "$MKDIR_P"; then + if ${ac_cv_path_mkdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in mkdir gmkdir; do + for ac_exec_ext in '' $ac_executable_extensions; do + as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue + case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( + 'mkdir (GNU coreutils) '* | \ + 'mkdir (coreutils) '* | \ + 'mkdir (fileutils) '4.1*) + ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext + break 3;; + esac + done + done + done +IFS=$as_save_IFS + +fi + + test -d ./--version && rmdir ./--version + if test "${ac_cv_path_mkdir+set}" = set; then + MKDIR_P="$ac_cv_path_mkdir -p" + else + # As a last resort, use the slow shell script. Don't cache a + # value for MKDIR_P within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + MKDIR_P="$ac_install_sh -d" + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +$as_echo "$MKDIR_P" >&6; } + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +# Check whether --enable-silent-rules was given. +if test "${enable_silent_rules+set}" = set; then : + enableval=$enable_silent_rules; +fi + +case $enable_silent_rules in # ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=1;; +esac +am_make=${MAKE-make} +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +$as_echo_n "checking whether $am_make supports nested variables... " >&6; } +if ${am_cv_make_support_nested_variables+:} false; then : + $as_echo_n "(cached) " >&6 +else + if $as_echo 'TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +$as_echo "$am_cv_make_support_nested_variables" >&6; } +if test $am_cv_make_support_nested_variables = yes; then + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AM_BACKSLASH='\' + +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + am__isrc=' -I$(srcdir)' + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE='trafficserver' + VERSION='5.0.1' + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE "$PACKAGE" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define VERSION "$VERSION" +_ACEOF + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +# For better backward compatibility. To be removed once Automake 1.9.x +# dies out for good. For more background, see: +# +# +mkdir_p='$(MKDIR_P)' + +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AMTAR='$${TAR-tar}' + + +# We'll loop over all known methods to create a tar archive until one works. +_am_tools='gnutar plaintar pax cpio none' + +# The POSIX 1988 'ustar' format is defined with fixed-size fields. + # There is notably a 21 bits limit for the UID and the GID. In fact, + # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 + # and bug#13588). + am_max_uid=2097151 # 2^21 - 1 + am_max_gid=$am_max_uid + # The $UID and $GID variables are not portable, so we need to resort + # to the POSIX-mandated id(1) utility. Errors in the 'id' calls + # below are definitely unexpected, so allow the users to see them + # (that is, avoid stderr redirection). + am_uid=`id -u || echo unknown` + am_gid=`id -g || echo unknown` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether UID '$am_uid' is supported by ustar format" >&5 +$as_echo_n "checking whether UID '$am_uid' is supported by ustar format... " >&6; } + if test $am_uid -le $am_max_uid; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + _am_tools=none + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether GID '$am_gid' is supported by ustar format" >&5 +$as_echo_n "checking whether GID '$am_gid' is supported by ustar format... " >&6; } + if test $am_gid -le $am_max_gid; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + _am_tools=none + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to create a ustar tar archive" >&5 +$as_echo_n "checking how to create a ustar tar archive... " >&6; } + + # Go ahead even if we have the value already cached. We do so because we + # need to set the values for the 'am__tar' and 'am__untar' variables. + _am_tools=${am_cv_prog_tar_ustar-$_am_tools} + + for _am_tool in $_am_tools; do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; do + { echo "$as_me:$LINENO: $_am_tar --version" >&5 + ($_am_tar --version) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && break + done + am__tar="$_am_tar --format=ustar -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=ustar -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x ustar -w "$$tardir"' + am__tar_='pax -L -x ustar -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H ustar -L' + am__tar_='find "$tardir" -print | cpio -o -H ustar -L' + am__untar='cpio -i -H ustar -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_ustar}" && break + + # tar/untar a dummy directory, and stop if the command works. + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + { echo "$as_me:$LINENO: tardir=conftest.dir && eval $am__tar_ >conftest.tar" >&5 + (tardir=conftest.dir && eval $am__tar_ >conftest.tar) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + rm -rf conftest.dir + if test -s conftest.tar; then + { echo "$as_me:$LINENO: $am__untar &5 + ($am__untar &5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + { echo "$as_me:$LINENO: cat conftest.dir/file" >&5 + (cat conftest.dir/file) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + grep GrepMe conftest.dir/file >/dev/null 2>&1 && break + fi + done + rm -rf conftest.dir + + if ${am_cv_prog_tar_ustar+:} false; then : + $as_echo_n "(cached) " >&6 +else + am_cv_prog_tar_ustar=$_am_tool +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_tar_ustar" >&5 +$as_echo "$am_cv_prog_tar_ustar" >&6; } + + + + + + +ac_config_headers="$ac_config_headers lib/ts/ink_autoconf.h" + + +# Configure with --disable-silent-rules to get verbose output. For more info, see +# http://www.gnu.org/software/automake/manual/html_node/Automake-silent_002drules-Option.html +# Check whether --enable-silent-rules was given. +if test "${enable_silent_rules+set}" = set; then : + enableval=$enable_silent_rules; +fi + +case $enable_silent_rules in # ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=0;; +esac +am_make=${MAKE-make} +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +$as_echo_n "checking whether $am_make supports nested variables... " >&6; } +if ${am_cv_make_support_nested_variables+:} false; then : + $as_echo_n "(cached) " >&6 +else + if $as_echo 'TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +$as_echo "$am_cv_make_support_nested_variables" >&6; } +if test $am_cv_make_support_nested_variables = yes; then + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AM_BACKSLASH='\' + + +# Check whether --enable-static was given. +if test "${enable_static+set}" = set; then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_static=no +fi + + + + + + + + + + +# Libtool versioning uses different conventions on different +# platforms. At least on FreeBSD, libtool uses an overly complex +# convention that attempts to solve problems that most people just +# don't have and which just causes confusion for most end users. +# +TS_VERSION_MAJOR=$((5000001 / 1000000 )) +TS_VERSION_MINOR=$(((5000001 / 1000) % 1000 )) +TS_VERSION_MICRO=$((5000001 % 1000 )) +TS_LIBTOOL_MAJOR=`echo $((${TS_VERSION_MAJOR} + ${TS_VERSION_MINOR}))` +TS_LIBTOOL_VERSION=$TS_LIBTOOL_MAJOR:$TS_VERSION_MICRO:$TS_VERSION_MINOR +TS_VERSION_STRING=5.0.1 +TS_VERSION_NUMBER=5000001 + +# +# Substitute the above version numbers into the various files below. +# + + + + + + + + + + + +# +# Generate ./config.nice for reproducing runs of configure +# + + rm -f config.nice + cat >config.nice<> config.nice + fi + if test -n "$CXX"; then + echo "CXX=\"$CXX\"; export CXX" >> config.nice + fi + if test -n "$CFLAGS"; then + echo "CFLAGS=\"$CFLAGS\"; export CFLAGS" >> config.nice + fi + if test -n "$CXXFLAGS"; then + echo "CXXFLAGS=\"$CXXFLAGS\"; export CXXFLAGS" >> config.nice + fi + if test -n "$CPPFLAGS"; then + echo "CPPFLAGS=\"$CPPFLAGS\"; export CPPFLAGS" >> config.nice + fi + if test -n "$LDFLAGS"; then + echo "LDFLAGS=\"$LDFLAGS\"; export LDFLAGS" >> config.nice + fi + if test -n "$LTFLAGS"; then + echo "LTFLAGS=\"$LTFLAGS\"; export LTFLAGS" >> config.nice + fi + if test -n "$LIBS"; then + echo "LIBS=\"$LIBS\"; export LIBS" >> config.nice + fi + if test -n "$INCLUDES"; then + echo "INCLUDES=\"$INCLUDES\"; export INCLUDES" >> config.nice + fi + if test -n "$NOTEST_CFLAGS"; then + echo "NOTEST_CFLAGS=\"$NOTEST_CFLAGS\"; export NOTEST_CFLAGS" >> config.nice + fi + if test -n "$NOTEST_CXXFLAGS"; then + echo "NOTEST_CXXFLAGS=\"$NOTEST_CXXFLAGS\"; export NOTEST_CXXFLAGS" >> config.nice + fi + if test -n "$NOTEST_CPPFLAGS"; then + echo "NOTEST_CPPFLAGS=\"$NOTEST_CPPFLAGS\"; export NOTEST_CPPFLAGS" >> config.nice + fi + if test -n "$NOTEST_LDFLAGS"; then + echo "NOTEST_LDFLAGS=\"$NOTEST_LDFLAGS\"; export NOTEST_LDFLAGS" >> config.nice + fi + if test -n "$NOTEST_LIBS"; then + echo "NOTEST_LIBS=\"$NOTEST_LIBS\"; export NOTEST_LIBS" >> config.nice + fi + + # Retrieve command-line arguments. + eval "set x $0 $ac_configure_args" + shift + + for arg + do + +ats_last= +ats_cur="$arg" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +arg="${ats_cur}" + + echo "\"$arg\" \\" >> config.nice + done + echo '"$@"' >> config.nice + chmod +x config.nice + + +# XXX we can't just use AC_PREFIX_DEFAULT because that isn't subbed in +# by configure until it is too late. Is that how it should be or not? +# Something seems broken here. + + +# Get the layout here, so we can pass the required variables to Trafficserver + +# Check whether --enable-layout was given. +if test "${enable_layout+set}" = set; then : + enableval=$enable_layout; + LAYOUT=$enableval + +fi + + +if test -z "$LAYOUT"; then + LAYOUT="TrafficServer" +fi + + if test ! -f $srcdir/config.layout; then + echo "** Error: Layout file $srcdir/config.layout not found" + echo "** Error: Cannot use undefined layout '$LAYOUT'" + exit 1 + fi + # Catch layout names including a slash which will otherwise + # confuse the heck out of the sed script. + case $LAYOUT in + */*) + echo "** Error: $LAYOUT is not a valid layout name" + exit 1 ;; + esac + pldconf=./config.pld + + sed -e "1s/[ ]*<[lL]ayout[ ]*$LAYOUT[ ]*>[ ]*//;1t" \ + -e "1,/[ ]*<[lL]ayout[ ]*$LAYOUT[ ]*>[ ]*/d" \ + -e '/[ ]*<\/Layout>[ ]*/,$d' \ + -e "s/^[ ]*//g" \ + -e "s/:[ ]*/=\'/g" \ + -e "s/[ ]*$/'/g" \ + $srcdir/config.layout > $pldconf + layout_name=$LAYOUT + if test ! -s $pldconf; then + echo "** Error: unable to find layout $layout_name" + exit 1 + fi + . $pldconf + rm $pldconf + for var in prefix exec_prefix bindir sbindir libexecdir mandir infodir \ + sysconfdir datadir includedir localstatedir runtimedir \ + logdir libdir installbuilddir libsuffix cachedir docdir; do + eval "val=\"\$$var\"" + case $val in + *+) + val=`echo $val | sed -e 's;\+$;;'` + eval "$var=\"\$val\"" + autosuffix=yes + ;; + *) + autosuffix=no + ;; + esac + val=`echo $val | sed -e 's:\(.\)/*$:\1:'` + val=`echo $val | sed -e 's:[\$]\([a-z_]*\):$\1:g'` + if test "$autosuffix" = "yes"; then + if echo $val | grep -i '/trafficserver$' >/dev/null; then + addtarget=no + else + addtarget=yes + fi + if test "$addtarget" = "yes"; then + val="$val/trafficserver" + fi + fi + eval "$var='$val'" + done + for var in bindir sbindir libexecdir mandir infodir sysconfdir \ + datadir localstatedir runtimedir logdir libdir cachedir docdir; do + eval "val=\"\$$var\"" + case $val in + *+) + val=`echo $val | sed -e 's;\+$;;'` + eval "$var=\"\$val\"" + autosuffix=yes + ;; + *) + autosuffix=no + ;; + esac + org_val= + exp_val="$val" + while test "x$exp_val" != "x$org_val"; + do + org_val="$exp_val" + exp_val="`eval \"echo $exp_val\"`" + done + if echo $exp_val | grep -i '/trafficserver$' >/dev/null; then + addtarget=no + else + addtarget=yes + fi + if test "$addsuffix" = "yes" -a "$addtarget" = "yes"; then + val="$val/trafficserver" + fi + var="pkg$var" + eval "$var='$val'" + done + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for chosen layout" >&5 +$as_echo_n "checking for chosen layout... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $layout_name" >&5 +$as_echo "$layout_name" >&6; } + + +# Reparse the configure arguments so we can override the layout. + +ac_prev= +# Retrieve the command-line arguments. The eval is needed because +# the arguments are quoted to preserve accuracy. +eval "set x $ac_configure_args" +shift +for ac_option +do +# If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval "$ac_prev=\$ac_option" + ac_prev= + continue + fi + + ac_optarg=`expr "x$ac_option" : 'x[^=]*=\(.*\)'` + + case $ac_option in + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir="$ac_optarg" + pkgbindir="$ac_optarg" ;; + + -datadir | --datadir | --datadi | --datad | --data | --dat | --da) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=* | --data=* | --dat=* \ + | --da=*) + datadir="$ac_optarg" + pkgdatadir="$ac_optarg" ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix="$ac_optarg" ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir="$ac_optarg" ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir="$ac_optarg" ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir="$ac_optarg" + pkglibdir="$ac_optarg" ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir="$ac_optarg" + pkglibexecdir="$ac_optarg" ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst \ + | --locals | --local | --loca | --loc | --lo) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* \ + | --locals=* | --local=* | --loca=* | --loc=* | --lo=*) + localstatedir="$ac_optarg" + pkglocalstatedir="$ac_optarg" ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir="$ac_optarg" ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix="$ac_optarg" ;; + + -runtimedir | --runtimedir | --runtimedi | --runtimed | --runtime | --runtim \ + | --runti | --runt | --run | --ru | --r) + ac_prev=runtimedir ;; + -runtimedir=* | --runtimedir=* | --runtimedi=* | --runtimed=* | --runtime=* \ + | --runtim=* | --runti=* | --runt=* | --run=* | --ru=* | --r=*) + ac_prev=runtimedir ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir="$ac_optarg" + pkgsbindir="$ac_optarg" ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir="$ac_optarg" ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir="$ac_optarg" + pkgsysconfdir="$ac_optarg" ;; + + esac +done + +# Be sure to have absolute paths. +for ac_var in exec_prefix prefix +do + eval ac_val=$`echo $ac_var` + case $ac_val in + [\\/$]* | ?:[\\/]* | NONE | '' ) ;; + *) as_fn_error $? "expected an absolute path for --$ac_var: $ac_val" "$LINENO" 5;; + esac +done + + + +# +# Host detection +# +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +HOST_GUESS="$host" + + +# +# Build environment +# +build_person="`id -nu`" +build_group="`id -ng`" +build_machine="`uname -n`" + + + + + +# Check whether --with-user was given. +if test "${with_user+set}" = set; then : + withval=$with_user; + with_user="$withval" + +else + + with_user="nobody" + + +fi + + +default_group="`id -ng $with_user`" + +# Check whether --with-group was given. +if test "${with_group+set}" = set; then : + withval=$with_group; + with_group="$withval" + +else + + with_group=${default_group:-nobody} + + +fi + +pkgsysuser=$with_user + +pkgsysgroup=$with_group + + +# ----------------------------------------------------------------------------- +# 2. SITE CONFIGURATION + +# +# Debug +# + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable debugging" >&5 +$as_echo_n "checking whether to enable debugging... " >&6; } +# Check whether --enable-debug was given. +if test "${enable_debug+set}" = set; then : + enableval=$enable_debug; +else + enable_debug=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_debug" >&5 +$as_echo "$enable_debug" >&6; } + +# +# Enable -Werror. We want this enabled by default for developers, but disabled by default +# for end users (because we don't want released versions to suffer from compiler warning hell). +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable -Werror" >&5 +$as_echo_n "checking whether to enable -Werror... " >&6; } +# Check whether --enable-werror was given. +if test "${enable_werror+set}" = set; then : + enableval=$enable_werror; +else + + case $TS_VERSION_STRING in + *dev) enable_werror=yes ;; + *) enable_werror=no ;; + esac + + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_werror" >&5 +$as_echo "$enable_werror" >&6; } + + +# +# Fast SDK APIs, this disables the parameter checks (assert) +# on all APIs. +# + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable fast SDK APIs" >&5 +$as_echo_n "checking whether to enable fast SDK APIs... " >&6; } +# Check whether --enable-fast-sdk was given. +if test "${enable_fast_sdk+set}" = set; then : + enableval=$enable_fast_sdk; +else + enable_fast_sdk=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_fast_sdk" >&5 +$as_echo "$enable_fast_sdk" >&6; } + + tsl_prefix="use" + tsl_stem="fast_sdk" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + +# +# Diags +# + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable diags" >&5 +$as_echo_n "checking whether to enable diags... " >&6; } +# Check whether --enable-diags was given. +if test "${enable_diags+set}" = set; then : + enableval=$enable_diags; +else + enable_diags=yes + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_diags" >&5 +$as_echo "$enable_diags" >&6; } + + tsl_prefix="use" + tsl_stem="diags" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + +# +# Build regression tests? +# + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable regression tests" >&5 +$as_echo_n "checking whether to enable regression tests... " >&6; } +# Check whether --enable-tests was given. +if test "${enable_tests+set}" = set; then : + enableval=$enable_tests; +else + enable_tests=yes + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_tests" >&5 +$as_echo "$enable_tests" >&6; } + + tsl_prefix="has" + tsl_stem="tests" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + if test 0 -ne $has_tests; then + BUILD_TESTS_TRUE= + BUILD_TESTS_FALSE='#' +else + BUILD_TESTS_TRUE='#' + BUILD_TESTS_FALSE= +fi + + +ink_with_modules_local="-DLOCAL_MANAGER" + +ink_with_modules_process="-DPROCESS_MANAGER" + + +# +# Force some static linkage (for testing / development only) +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build some static libts (dev only)" >&5 +$as_echo_n "checking whether to build some static libts (dev only)... " >&6; } +# Check whether --enable-static-libts was given. +if test "${enable_static_libts+set}" = set; then : + enableval=$enable_static_libts; +else + enable_static_libts=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static_libts" >&5 +$as_echo "$enable_static_libts" >&6; } + + tsl_prefix="has" + tsl_stem="static_libts" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + if test 0 -ne $has_static_libts; then + STATIC_LIBTS_TRUE= + STATIC_LIBTS_FALSE='#' +else + STATIC_LIBTS_TRUE='#' + STATIC_LIBTS_FALSE= +fi + + +# +# Remote Coverity Prevent commit +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to commit cov defects to remote host" >&5 +$as_echo_n "checking whether to commit cov defects to remote host... " >&6; } +# Check whether --enable-remote-cov-commit was given. +if test "${enable_remote_cov_commit+set}" = set; then : + enableval=$enable_remote_cov_commit; +else + enable_remote_cov_commit=localhost + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_remote_cov_commit" >&5 +$as_echo "$enable_remote_cov_commit" >&6; } + + +# +# WCCP +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable WCCP v2 support" >&5 +$as_echo_n "checking whether to enable WCCP v2 support... " >&6; } +# Check whether --enable-wccp was given. +if test "${enable_wccp+set}" = set; then : + enableval=$enable_wccp; +else + enable_wccp=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_wccp" >&5 +$as_echo "$enable_wccp" >&6; } + + tsl_prefix="has" + tsl_stem="wccp" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + if test 0 -ne $has_wccp; then + BUILD_WCCP_TRUE= + BUILD_WCCP_FALSE='#' +else + BUILD_WCCP_TRUE='#' + BUILD_WCCP_FALSE= +fi + + +# +# InterimCache +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable interim cache support" >&5 +$as_echo_n "checking whether to enable interim cache support... " >&6; } +# Check whether --enable-interim_cache was given. +if test "${enable_interim_cache+set}" = set; then : + enableval=$enable_interim_cache; +else + enable_interim_cache=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_interim_cache" >&5 +$as_echo "$enable_interim_cache" >&6; } + + tsl_prefix="has" + tsl_stem="interim_cache" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + if test 0 -ne $has_interim_cache; then + BUILD_INTERIM_CACHE_TRUE= + BUILD_INTERIM_CACHE_FALSE='#' +else + BUILD_INTERIM_CACHE_TRUE='#' + BUILD_INTERIM_CACHE_FALSE= +fi + + +# Google profiler +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable profiler" >&5 +$as_echo_n "checking whether to enable profiler... " >&6; } + +# Check whether --with-profiler was given. +if test "${with_profiler+set}" = set; then : + withval=$with_profiler; with_profiler=$withval +else + with_profiler=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_profiler" >&5 +$as_echo "$with_profiler" >&6; } + +# +# use eventfd() or pipes +# Found that ec2 is using an older kernel causing eventfd errors. +# Disable eventfd when using ATS on EC2 Fedora. +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable eventfd()" >&5 +$as_echo_n "checking whether to enable eventfd()... " >&6; } +# Check whether --enable-eventfd was given. +if test "${enable_eventfd+set}" = set; then : + enableval=$enable_eventfd; +else + enable_eventfd="yes" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_eventfd" >&5 +$as_echo "$enable_eventfd" >&6; } + +# +# use POSIX capabilities instead of user ID switching. +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use POSIX capabilities" >&5 +$as_echo_n "checking whether to use POSIX capabilities... " >&6; } +# Check whether --enable-posix-cap was given. +if test "${enable_posix_cap+set}" = set; then : + enableval=$enable_posix_cap; +else + enable_posix_cap="auto" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_posix_cap" >&5 +$as_echo "$enable_posix_cap" >&6; } + +# +# use hwloc library when possible (can be disabled) +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use hwloc library" >&5 +$as_echo_n "checking whether to use hwloc library... " >&6; } +# Check whether --enable-hwloc was given. +if test "${enable_hwloc+set}" = set; then : + enableval=$enable_hwloc; +else + enable_hwloc="yes" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_hwloc" >&5 +$as_echo "$enable_hwloc" >&6; } + +# +# Enble ccache explicitly (it's disabled by default, because of build problems in some cases) +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable ccache" >&5 +$as_echo_n "checking whether to enable ccache... " >&6; } +# Check whether --enable-ccache was given. +if test "${enable_ccache+set}" = set; then : + enableval=$enable_ccache; +else + enable_ccache="no" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_ccache" >&5 +$as_echo "$enable_ccache" >&6; } + +# +# Use TPROXY for connection transparency. +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable TPROXY based transparency" >&5 +$as_echo_n "checking whether to enable TPROXY based transparency... " >&6; } +# Check whether --enable-tproxy was given. +if test "${enable_tproxy+set}" = set; then : + enableval=$enable_tproxy; +else + enable_tproxy="auto" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_tproxy" >&5 +$as_echo "$enable_tproxy" >&6; } + +# +# Disable our freelist implementation, reverting it to whatever +# allocator (malloc, tcmalloc or jemalloc) that is in use. This is +# useful for debugging. +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to disable freelist" >&5 +$as_echo_n "checking whether to disable freelist... " >&6; } +# Check whether --enable-freelist was given. +if test "${enable_freelist+set}" = set; then : + enableval=$enable_freelist; +else + enable_freelist="yes" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_freelist" >&5 +$as_echo "$enable_freelist" >&6; } + + tsl_prefix="use" + tsl_stem="freelist" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + +# +# The original InkFreeList memory pool could not reclaim itself +# automatically. We can use '--enable-reclaimable-freelist' option +# to enable the reclaiming feature. This option is effective only +# when freelist is enable. +# +if test "x${enable_freelist}" = "xyes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable reclaimable freelist" >&5 +$as_echo_n "checking whether to enable reclaimable freelist... " >&6; } + # Check whether --enable-reclaimable-freelist was given. +if test "${enable_reclaimable_freelist+set}" = set; then : + enableval=$enable_reclaimable_freelist; +else + enable_reclaimable_freelist="no" +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_reclaimable_freelist" >&5 +$as_echo "$enable_reclaimable_freelist" >&6; } +else + enable_reclaimable_freelist="no" +fi + + tsl_prefix="use" + tsl_stem="reclaimable_freelist" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + +# +# Options for SPDY +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable spdy" >&5 +$as_echo_n "checking whether to enable spdy... " >&6; } +# Check whether --enable-spdy was given. +if test "${enable_spdy+set}" = set; then : + enableval=$enable_spdy; +else + enable_spdy="no" +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_spdy" >&5 +$as_echo "$enable_spdy" >&6; } + + tsl_prefix="has" + tsl_stem="spdy" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + if test 0 -ne $has_spdy; then + BUILD_SPDY_TRUE= + BUILD_SPDY_FALSE='#' +else + BUILD_SPDY_TRUE='#' + BUILD_SPDY_FALSE= +fi + + +# +# Configure how many stats to allocate for plugins. Default is 512. +# + +# Check whether --with-max-api-stats was given. +if test "${with_max_api_stats+set}" = set; then : + withval=$with_max_api_stats; max_api_stats=$withval +else + max_api_stats=512 + +fi + + + +# +# Max host name length that we deal with in URLs. +# + +# Check whether --with-max-host-name-len was given. +if test "${with_max_host_name_len+set}" = set; then : + withval=$with_max_host_name_len; max_host_name_len=$withval +else + max_host_name_len=256 + +fi + + + +# +# EventProcessor thread configurations +# + + +# Check whether --with-max-event-threads was given. +if test "${with_max_event_threads+set}" = set; then : + withval=$with_max_event_threads; max_event_threads=$withval +else + max_event_threads=4096 + +fi + + + + +# Check whether --with-max-threads-per-type was given. +if test "${with_max_threads_per_type+set}" = set; then : + withval=$with_max_threads_per_type; max_threads_per_type=$withval +else + max_threads_per_type=3072 + +fi + + + +# +# Experimental plugins +# + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable experimental plugins" >&5 +$as_echo_n "checking whether to enable experimental plugins... " >&6; } +# Check whether --enable-experimental-plugins was given. +if test "${enable_experimental_plugins+set}" = set; then : + enableval=$enable_experimental_plugins; +else + enable_experimental_plugins=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_experimental_plugins" >&5 +$as_echo "$enable_experimental_plugins" >&6; } + if test "x${enable_experimental_plugins}" = "xyes" ; then + BUILD_EXPERIMENTAL_PLUGINS_TRUE= + BUILD_EXPERIMENTAL_PLUGINS_FALSE='#' +else + BUILD_EXPERIMENTAL_PLUGINS_TRUE='#' + BUILD_EXPERIMENTAL_PLUGINS_FALSE= +fi + + +# +# Example plugins. The example plugins are always built, but not always installed. Installing +# them is useful for QA, but not useful for most users, so we default this to disabled. +# + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to install example plugins" >&5 +$as_echo_n "checking whether to install example plugins... " >&6; } +# Check whether --enable-example-plugins was given. +if test "${enable_example_plugins+set}" = set; then : + enableval=$enable_example_plugins; +else + enable_example_plugins=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_example_plugins" >&5 +$as_echo "$enable_example_plugins" >&6; } + if test "x${enable_example_plugins}" = "xyes" ; then + BUILD_EXAMPLE_PLUGINS_TRUE= + BUILD_EXAMPLE_PLUGINS_FALSE='#' +else + BUILD_EXAMPLE_PLUGINS_TRUE='#' + BUILD_EXAMPLE_PLUGINS_FALSE= +fi + + + +# +# Test tools. The test tools are always built, but not always installed. Installing +# them is useful for QA, but not useful for most users, so we default this to disabled. +# + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to install testing tools" >&5 +$as_echo_n "checking whether to install testing tools... " >&6; } +# Check whether --enable-test-tools was given. +if test "${enable_test_tools+set}" = set; then : + enableval=$enable_test_tools; +else + enable_test_tools=no + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_test_tools" >&5 +$as_echo "$enable_test_tools" >&6; } + if test "x${enable_test_tools}" = "xyes" ; then + BUILD_TEST_TOOLS_TRUE= + BUILD_TEST_TOOLS_FALSE='#' +else + BUILD_TEST_TOOLS_TRUE='#' + BUILD_TEST_TOOLS_FALSE= +fi + + +# +# build c++ api +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build c++ api" >&5 +$as_echo_n "checking whether to build c++ api... " >&6; } +# Check whether --enable-cppapi was given. +if test "${enable_cppapi+set}" = set; then : + enableval=$enable_cppapi; +else + enable_cppapi="no" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_cppapi" >&5 +$as_echo "$enable_cppapi" >&6; } + if test "x${enable_cppapi}" = "xyes" ; then + ENABLE_CPPAPI_TRUE= + ENABLE_CPPAPI_FALSE='#' +else + ENABLE_CPPAPI_TRUE='#' + ENABLE_CPPAPI_FALSE= +fi + + +# +# Disable LuaJIT? This is a interrim option, when Lua becomes part of the core +# requirements, we will remove this option. +# +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to disable LuaJIT" >&5 +$as_echo_n "checking whether to disable LuaJIT... " >&6; } +# Check whether --enable-luajit was given. +if test "${enable_luajit+set}" = set; then : + enableval=$enable_luajit; +else + enable_luajit="yes" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_luajit" >&5 +$as_echo "$enable_luajit" >&6; } + if test "x${enable_luajit}" = "xyes" ; then + BUILD_LUAJIT_TRUE= + BUILD_LUAJIT_FALSE='#' +else + BUILD_LUAJIT_TRUE='#' + BUILD_LUAJIT_FALSE= +fi + + +# +# Installation directories +# For each var the following is evaluated +# foo Standard variable eg. ${prefix}/foo +# rel_foo Relative to prefix eg. foo +# + + +ats_last= +ats_cur="$prefix" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_prefix="${ats_cur}" + + +ats_stripped=`echo $exp_prefix | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_prefix" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_prefix="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_prefix="$exp_prefix" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_prefix" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_prefix" + + + + TS_VAR_SUBST="$TS_VAR_SUBST prefix" + + + + + +ats_last= +ats_cur="$exec_prefix" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_exec_prefix="${ats_cur}" + + +ats_stripped=`echo $exp_exec_prefix | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_exec_prefix" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_exec_prefix="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_exec_prefix="$exp_exec_prefix" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_exec_prefix" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_exec_prefix" + + + + TS_VAR_SUBST="$TS_VAR_SUBST exec_prefix" + + + + + +ats_last= +ats_cur="$bindir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_bindir="${ats_cur}" + + +ats_stripped=`echo $exp_bindir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_bindir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_bindir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_bindir="$exp_bindir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_bindir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_bindir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST bindir" + + + + + +ats_last= +ats_cur="$sbindir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_sbindir="${ats_cur}" + + +ats_stripped=`echo $exp_sbindir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_sbindir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_sbindir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_sbindir="$exp_sbindir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_sbindir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_sbindir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST sbindir" + + + + + +ats_last= +ats_cur="$libdir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_libdir="${ats_cur}" + + +ats_stripped=`echo $exp_libdir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_libdir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_libdir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_libdir="$exp_libdir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_libdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_libdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST libdir" + + + + + +ats_last= +ats_cur="$libexecdir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_libexecdir="${ats_cur}" + + +ats_stripped=`echo $exp_libexecdir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_libexecdir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_libexecdir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_libexecdir="$exp_libexecdir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_libexecdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_libexecdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST libexecdir" + + + + + +ats_last= +ats_cur="$infodir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_infodir="${ats_cur}" + + +ats_stripped=`echo $exp_infodir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_infodir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_infodir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_infodir="$exp_infodir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_infodir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_infodir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST infodir" + + + + + +ats_last= +ats_cur="$mandir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_mandir="${ats_cur}" + + +ats_stripped=`echo $exp_mandir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_mandir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_mandir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_mandir="$exp_mandir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_mandir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_mandir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST mandir" + + + + + +ats_last= +ats_cur="$sysconfdir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_sysconfdir="${ats_cur}" + + +ats_stripped=`echo $exp_sysconfdir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_sysconfdir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_sysconfdir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_sysconfdir="$exp_sysconfdir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_sysconfdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_sysconfdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST sysconfdir" + + + + + +ats_last= +ats_cur="$datadir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_datadir="${ats_cur}" + + +ats_stripped=`echo $exp_datadir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_datadir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_datadir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_datadir="$exp_datadir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_datadir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_datadir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST datadir" + + + + + +ats_last= +ats_cur="$installbuilddir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_installbuilddir="${ats_cur}" + + +ats_stripped=`echo $exp_installbuilddir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_installbuilddir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_installbuilddir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_installbuilddir="$exp_installbuilddir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_installbuilddir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_installbuilddir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST installbuilddir" + + + + + +ats_last= +ats_cur="$includedir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_includedir="${ats_cur}" + + +ats_stripped=`echo $exp_includedir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_includedir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_includedir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_includedir="$exp_includedir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_includedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_includedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST includedir" + + + + + +ats_last= +ats_cur="$localstatedir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_localstatedir="${ats_cur}" + + +ats_stripped=`echo $exp_localstatedir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_localstatedir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_localstatedir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_localstatedir="$exp_localstatedir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_localstatedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_localstatedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST localstatedir" + + + + + +ats_last= +ats_cur="$runtimedir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_runtimedir="${ats_cur}" + + +ats_stripped=`echo $exp_runtimedir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_runtimedir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_runtimedir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_runtimedir="$exp_runtimedir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_runtimedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_runtimedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST runtimedir" + + + + + +ats_last= +ats_cur="$logdir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_logdir="${ats_cur}" + + +ats_stripped=`echo $exp_logdir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_logdir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_logdir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_logdir="$exp_logdir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_logdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_logdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST logdir" + + + + + +ats_last= +ats_cur="$cachedir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_cachedir="${ats_cur}" + + +ats_stripped=`echo $exp_cachedir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_cachedir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_cachedir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_cachedir="$exp_cachedir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_cachedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_cachedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST cachedir" + + + + + +ats_last= +ats_cur="$docdir" +while test "x${ats_cur}" != "x${ats_last}"; +do + ats_last="${ats_cur}" + ats_cur=`eval "echo ${ats_cur}"` +done +exp_docdir="${ats_cur}" + + +ats_stripped=`echo $exp_docdir | sed -e "s#^${prefix}##"` +# check if the stripping was successful +if test "x$exp_docdir" != "x${ats_stripped}"; then +# it was, so strip of any leading slashes + rel_docdir="`echo ${ats_stripped} | sed -e 's#^/*##'`" +else +# it wasn't so return the original + rel_docdir="$exp_docdir" +fi + + + TS_VAR_SUBST="$TS_VAR_SUBST exp_docdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST rel_docdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST docdir" + + + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkgbindir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkgsbindir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkglibdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkglibexecdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkgsysconfdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkgdatadir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkglocalstatedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkgruntimedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkglogdir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkgcachedir" + + + + TS_VAR_SUBST="$TS_VAR_SUBST pkgdocdir" + + + + +# ----------------------------------------------------------------------------- +# 3. CHECK FOR PROGRAMS + +# Compiler selection: +# +# Implementation note (toc) +# 1) Get default compiler settings (case statement.) +# 2) Check for over-rides of default compiler. +# 3) Set standard CFLAGS, SHARED_CFLAGS, etc. +# 4) (in first kludge mode block...) obtain any further CFLAG-type additions. +# 5) Test compilers with all flags set. + +# AC_PROG can sometimes mangle CFLAGS etc. +# in particular, on Linux they insert -g -O2, here we preserve any user CFLAGS +_ts_saved_CFLAGS="${CFLAGS}" +_ts_saved_CXXFLAGS="${CXXFLAGS}" + +# We force the compiler search list because the default GCC on Darwin cannot build +# Traffic Server. On most (all?) platforms, cc and c++ should be the preferred default +# compiler. +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + for ac_prog in cc gcc clang icc + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cc gcc clang icc +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +DEPDIR="${am__leading_dot}deps" + +ac_config_commands="$ac_config_commands depfiles" + + +am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo this is the am__doit target +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 +$as_echo_n "checking for style of include used by $am_make... " >&6; } +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# Ignore all kinds of additional output from 'make'. +case `$am_make -s -f confmf 2> /dev/null` in #( +*the\ am__doit\ target*) + am__include=include + am__quote= + _am_result=GNU + ;; +esac +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + case `$am_make -s -f confmf 2> /dev/null` in #( + *the\ am__doit\ target*) + am__include=.include + am__quote="\"" + _am_result=BSD + ;; + esac +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 +$as_echo "$_am_result" >&6; } +rm -f confinc confmf + +# Check whether --enable-dependency-tracking was given. +if test "${enable_dependency_tracking+set}" = set; then : + enableval=$enable_dependency_tracking; +fi + +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi + if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + + +depcc="$CC" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CC_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in c++ g++ clang++ icpc + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in c++ g++ clang++ icpc +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +depcc="$CXX" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CXX_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CXX_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CXX_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } +CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then + am__fastdepCXX_TRUE= + am__fastdepCXX_FALSE='#' +else + am__fastdepCXX_TRUE='#' + am__fastdepCXX_FALSE= +fi + + + +if test "x$CC" != xcc; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC and cc understand -c and -o together" >&5 +$as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cc understands -c and -o together" >&5 +$as_echo_n "checking whether cc understands -c and -o together... " >&6; } +fi +set dummy $CC; ac_cc=`$as_echo "$2" | + sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` +if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +# Make sure it works both with $CC and with simple cc. +# We do the test twice because some compilers refuse to overwrite an +# existing .o file with -o, though they will create one. +ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5' +rm -f conftest2.* +if { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && + test -f conftest2.$ac_objext && { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; +then + eval ac_cv_prog_cc_${ac_cc}_c_o=yes + if test "x$CC" != xcc; then + # Test first that cc exists at all. + if { ac_try='cc -c conftest.$ac_ext >&5' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5' + rm -f conftest2.* + if { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && + test -f conftest2.$ac_objext && { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; + then + # cc works too. + : + else + # cc exists but doesn't like -o. + eval ac_cv_prog_cc_${ac_cc}_c_o=no + fi + fi + fi +else + eval ac_cv_prog_cc_${ac_cc}_c_o=no +fi +rm -f core conftest* + +fi +if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +$as_echo "#define NO_MINUS_C_MINUS_O 1" >>confdefs.h + +fi + +# FIXME: we rely on the cache variable name because +# there is no other way. +set dummy $CC +am_cc=`echo $2 | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` +eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o +if test "$am_t" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +# By default we simply use the C compiler to build assembly code. + +test "${CCAS+set}" = set || CCAS=$CC +test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS + + + +depcc="$CCAS" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CCAS_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CCAS_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CCAS_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CCAS_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CCAS_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CCAS_dependencies_compiler_type" >&6; } +CCASDEPMODE=depmode=$am_cv_CCAS_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CCAS_dependencies_compiler_type" = gcc3; then + am__fastdepCCAS_TRUE= + am__fastdepCCAS_FALSE='#' +else + am__fastdepCCAS_TRUE='#' + am__fastdepCCAS_FALSE= +fi + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler vendor" >&5 +$as_echo_n "checking for C compiler vendor... " >&6; } +if ${ax_cv_c_compiler_vendor+:} false; then : + $as_echo_n "(cached) " >&6 +else + # note: don't check for gcc first since some other compilers define __GNUC__ + vendors="intel: __ICC,__ECC,__INTEL_COMPILER + ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__ + pathscale: __PATHCC__,__PATHSCALE__ + clang: __clang__ + gnu: __GNUC__ + sun: __SUNPRO_C,__SUNPRO_CC + hp: __HP_cc,__HP_aCC + dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER + borland: __BORLANDC__,__TURBOC__ + comeau: __COMO__ + cray: _CRAYC + kai: __KCC + lcc: __LCC__ + sgi: __sgi,sgi + microsoft: _MSC_VER + metrowerks: __MWERKS__ + watcom: __WATCOMC__ + portland: __PGI + unknown: UNKNOWN" + for ventest in $vendors; do + case $ventest in + *:) vendor=$ventest; continue ;; + *) vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" ;; + esac + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + #if !($vencpp) + thisisanerror; + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done + ax_cv_c_compiler_vendor=`echo $vendor | cut -d: -f1` + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_c_compiler_vendor" >&5 +$as_echo "$ax_cv_c_compiler_vendor" >&6; } + + +CFLAGS="${_ts_saved_CFLAGS}" +CXXFLAGS="${_ts_saved_CXXFLAGS}" + +# AX_CXX_COMPILE_STDCXX_11 requires the current language to be C++. +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + ac_success=no + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++11 features by default" >&5 +$as_echo_n "checking whether $CXX supports C++11 features by default... " >&6; } +if ${ax_cv_cxx_compile_cxx11+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + template + struct check + { + static_assert(sizeof(int) <= sizeof(T), "not big enough"); + }; + + + struct in_class_initialization { + int i = 0; + }; + + typedef check> right_angle_brackets; + + int a; + decltype(a) b; + + typedef check check_type; + check_type c; + check_type&& cr = static_cast(c); + + void * null_pointer_keyword = nullptr; + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ax_cv_cxx_compile_cxx11=yes +else + ax_cv_cxx_compile_cxx11=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_cxx_compile_cxx11" >&5 +$as_echo "$ax_cv_cxx_compile_cxx11" >&6; } + if test x$ax_cv_cxx_compile_cxx11 = xyes; then + ac_success=yes + fi + + + + if test x$ac_success = xno; then + for switch in -std=c++11 -std=c++0x; do + cachevar=`$as_echo "ax_cv_cxx_compile_cxx11_$switch" | $as_tr_sh` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX supports C++11 features with $switch" >&5 +$as_echo_n "checking whether $CXX supports C++11 features with $switch... " >&6; } +if eval \${$cachevar+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_CXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS $switch" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + template + struct check + { + static_assert(sizeof(int) <= sizeof(T), "not big enough"); + }; + + + struct in_class_initialization { + int i = 0; + }; + + typedef check> right_angle_brackets; + + int a; + decltype(a) b; + + typedef check check_type; + check_type c; + check_type&& cr = static_cast(c); + + void * null_pointer_keyword = nullptr; + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval $cachevar=yes +else + eval $cachevar=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CXXFLAGS="$ac_save_CXXFLAGS" +fi +eval ac_res=\$$cachevar + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + if eval test x\$$cachevar = xyes; then + CXXFLAGS="$CXXFLAGS $switch" + ac_success=yes + break + fi + done + fi + + if test x$ac_success = xno ; then + + enable_cxx_11_support=no + { $as_echo "$as_me:${as_lineno-$LINENO}: disabling features that depend on C++11 support" >&5 +$as_echo "$as_me: disabling features that depend on C++11 support" >&6;} + + else + enable_cxx_11_support=yes + fi + + + if test "x${enable_cxx_11_support}" = "xyes" ; then + BUILD_HAVE_CXX_11_TRUE= + BUILD_HAVE_CXX_11_FALSE='#' +else + BUILD_HAVE_CXX_11_TRUE='#' + BUILD_HAVE_CXX_11_FALSE= +fi + +if test "x${enable_cxx_11_support}" = "xyes" ; then + +$as_echo "#define HAVE_CXX_11 1" >>confdefs.h + +fi + +# While we are in C++ mode, check for the GNU hash_map extension. + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_cxx_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_cxx_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + +for ac_header in unordered_map unordered_set +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_cxx_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for tsc_i in unordered_map unordered_set +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for " >&5 +$as_echo_n "checking for ... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + + #include + #include + +int +main () +{ + + __gnu_cxx::hash_map map; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + have_gnu_cxx_hash_map=yes +else + have_gnu_cxx_hash_map=no + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_gnu_cxx_hash_map" >&5 +$as_echo "$have_gnu_cxx_hash_map" >&6; } +if test "x${have_gnu_cxx_hash_map}" = "xyes" ; then + +$as_echo "#define HAVE_GNU_CXX_HASH_MAP 1" >>confdefs.h + +fi + +# And check for std::shared_ptr vs std::tr1::shared_ptr + +for ac_header in unordered_map unordered_set +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_cxx_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for tsc_i in unordered_map unordered_set +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for std::shared_ptr" >&5 +$as_echo_n "checking for std::shared_ptr... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + + #include + using std::shared_ptr; + +int +main () +{ + + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + have_std_shared_ptr=yes +else + have_std_shared_ptr=no + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_std_shared_ptr" >&5 +$as_echo "$have_std_shared_ptr" >&6; } +if test "x${have_std_shared_ptr}" = "xyes" ; then + +$as_echo "#define HAVE_STD_SHARED_PTR 1" >>confdefs.h + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + +# Various OS specific setup. Note that on Solaris, 32-bit is always the +# default, even on a box that with 64-bit architecture. +# This also sets up a "normalized" variable and define $host_os_def. +defer_accept=1 +case $host_os in + linux*) + host_os_def="linux" + defer_accept=45 + EXTRA_CXX_LDFLAGS="-rdynamic" + ;; + darwin*) + host_os_def="darwin" + ;; + freebsd*) + host_os_def="freebsd" + EXTRA_CXX_LDFLAGS="-rdynamic" + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I/usr/local/include\"" + CPPFLAGS="-I/usr/local/include" + else + ats_addto_bugger="-I/usr/local/include" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + ;; + kfreebsd*) + host_os_def="freebsd" + EXTRA_CXX_LDFLAGS="-rdynamic" + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I/usr/local/include\"" + CPPFLAGS="-I/usr/local/include" + else + ats_addto_bugger="-I/usr/local/include" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-Dkfreebsd\"" + CPPFLAGS="-Dkfreebsd" + else + ats_addto_bugger="-Dkfreebsd" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + ;; + openbsd*) + host_os_def="openbsd" + ;; + solaris*) + host_os_def="solaris" + case "`isalist`" in + *amd64*) + + if test "x$CFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CFLAGS to \"-m64\"" + CFLAGS="-m64" + else + ats_addto_bugger="-m64" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CFLAGS" + CFLAGS="$CFLAGS $i" + fi + done + fi + + + if test "x$CXXFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CXXFLAGS to \"-m64\"" + CXXFLAGS="-m64" + else + ats_addto_bugger="-m64" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CXXFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CXXFLAGS" + CXXFLAGS="$CXXFLAGS $i" + fi + done + fi + + + if test "x$LUA_LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LUA_LDFLAGS to \"-m64\"" + LUA_LDFLAGS="-m64" + else + ats_addto_bugger="-m64" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LUA_LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LUA_LDFLAGS" + LUA_LDFLAGS="$LUA_LDFLAGS $i" + fi + done + fi + + ;; + esac + ;; + *) + EXTRA_CXX_LDFLAGS="-rdynamic" + host_os_def=unknown + ;; +esac + + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-D$host_os_def\"" + CPPFLAGS="-D$host_os_def" + else + ats_addto_bugger="-D$host_os_def" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + + +if test -n "$ac_tool_prefix"; then + for ac_prog in ar lib "link -lib" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar lib "link -lib" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_AR" && break +done + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi + +: ${AR=ar} + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the archiver ($AR) interface" >&5 +$as_echo_n "checking the archiver ($AR) interface... " >&6; } +if ${am_cv_ar_interface+:} false; then : + $as_echo_n "(cached) " >&6 +else + am_cv_ar_interface=ar + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int some_variable = 0; +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + am_ar_try='$AR cru libconftest.a conftest.$ac_objext >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$am_ar_try\""; } >&5 + (eval $am_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -eq 0; then + am_cv_ar_interface=ar + else + am_ar_try='$AR -NOLOGO -OUT:conftest.lib conftest.$ac_objext >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$am_ar_try\""; } >&5 + (eval $am_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -eq 0; then + am_cv_ar_interface=lib + else + am_cv_ar_interface=unknown + fi + fi + rm -f conftest.lib libconftest.a + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_ar_interface" >&5 +$as_echo "$am_cv_ar_interface" >&6; } + +case $am_cv_ar_interface in +ar) + ;; +lib) + # Microsoft lib, so override with the ar-lib wrapper script. + # FIXME: It is wrong to rewrite AR. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__AR in this case, + # and then we could set am__AR="$am_aux_dir/ar-lib \$(AR)" or something + # similar. + AR="$am_aux_dir/ar-lib $AR" + ;; +unknown) + as_fn_error $? "could not determine $AR interface" "$LINENO" 5 + ;; +esac + + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +$as_echo_n "checking for a sed that does not truncate output... " >&6; } +if ${ac_cv_path_SED+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_SED" || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi +else + ac_cv_path_SED=$SED +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +$as_echo "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +$as_echo_n "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +$as_echo "no, using $LN_S" >&6; } +fi + + +case `pwd` in + *\ * | *\ *) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac + + + +macro_version='2.4.2' +macro_revision='1.3337' + + + + + + + + + + + + + +ltmain="$ac_aux_dir/ltmain.sh" + +# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +$as_echo_n "checking how to print strings... " >&6; } +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "" +} + +case "$ECHO" in + printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +$as_echo "printf" >&6; } ;; + print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +$as_echo "print -r" >&6; } ;; + *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +$as_echo "cat" >&6; } ;; +esac + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +$as_echo_n "checking for a sed that does not truncate output... " >&6; } +if ${ac_cv_path_SED+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_SED" || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi +else + ac_cv_path_SED=$SED +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +$as_echo "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed + +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +$as_echo_n "checking for fgrep... " >&6; } +if ${ac_cv_path_FGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in fgrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_FGREP" || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_FGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_FGREP=$FGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +$as_echo "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +test -z "$GREP" && GREP=grep + + + + + + + + + + + + + + + + + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +if ${lt_cv_path_NM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +$as_echo "$lt_cv_path_NM" >&6; } +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + if test -n "$ac_tool_prefix"; then + for ac_prog in dumpbin "link -dump" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +$as_echo "$DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$DUMPBIN" && break + done +fi +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in dumpbin "link -dump" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +$as_echo "$ac_ct_DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_DUMPBIN" && break +done + + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DUMPBIN=$ac_ct_DUMPBIN + fi +fi + + case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: + ;; + esac + fi + + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +$as_echo_n "checking the name lister ($NM) interface... " >&6; } +if ${lt_cv_nm_interface+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +$as_echo "$lt_cv_nm_interface" >&6; } + +# find the maximum length of command line arguments +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +$as_echo_n "checking the maximum length of command line arguments... " >&6; } +if ${lt_cv_sys_max_cmd_len+:} false; then : + $as_echo_n "(cached) " >&6 +else + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac + +fi + +if test -n $lt_cv_sys_max_cmd_len ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +$as_echo "$lt_cv_sys_max_cmd_len" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 +$as_echo "none" >&6; } +fi +max_cmd_len=$lt_cv_sys_max_cmd_len + + + + + + +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 +$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 +$as_echo "$xsi_shell" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 +$as_echo_n "checking whether the shell understands \"+=\"... " >&6; } +lt_shell_append=no +( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 +$as_echo "$lt_shell_append" >&6; } + + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi + + + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +$as_echo_n "checking how to convert $build file names to $host format... " >&6; } +if ${lt_cv_to_host_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac + +fi + +to_host_file_cmd=$lt_cv_to_host_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +$as_echo "$lt_cv_to_host_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } +if ${lt_cv_to_tool_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + #assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac + +fi + +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +$as_echo "$lt_cv_to_tool_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +$as_echo_n "checking for $LD option to reload object files... " >&6; } +if ${lt_cv_ld_reload_flag+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_reload_flag='-r' +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +$as_echo "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + if test "$GCC" != yes; then + reload_cmds=false + fi + ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +$as_echo "$OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +$as_echo "$ac_ct_OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" +fi + +test -z "$OBJDUMP" && OBJDUMP=objdump + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +$as_echo_n "checking how to recognize dependent libraries... " >&6; } +if ${lt_cv_deplibs_check_method+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin. + if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +$as_echo "$lt_cv_deplibs_check_method" >&6; } + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + + + + + + + + + + + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +$as_echo "$DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +$as_echo "$ac_ct_DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi + +test -z "$DLLTOOL" && DLLTOOL=dlltool + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +$as_echo_n "checking how to associate runtime and link libraries... " >&6; } +if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh + # decide which to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd="$ECHO" + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + + + + + + + +if test -n "$ac_tool_prefix"; then + for ac_prog in ar + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_AR" && break +done + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi + +: ${AR=ar} +: ${AR_FLAGS=cru} + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +$as_echo_n "checking for archiver @FILE support... " >&6; } +if ${lt_cv_ar_at_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ar_at_file=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -eq 0; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -ne 0; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +$as_echo "$lt_cv_ar_at_file" >&6; } + +if test "x$lt_cv_ar_at_file" = xno; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +test -z "$STRIP" && STRIP=: + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +test -z "$RANLIB" && RANLIB=: + + + + + + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } +if ${lt_cv_sys_global_symbol_pipe+:} false; then : + $as_echo_n "(cached) " >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[ABCDEGRST]' + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 + (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +$as_echo_n "checking for sysroot... " >&6; } + +# Check whether --with-sysroot was given. +if test "${with_sysroot+set}" = set; then : + withval=$with_sysroot; +else + with_sysroot=no +fi + + +lt_sysroot= +case ${with_sysroot} in #( + yes) + if test "$GCC" = yes; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5 +$as_echo "${with_sysroot}" >&6; } + as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 + ;; +esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +$as_echo "${lt_sysroot:-no}" >&6; } + + + + + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then : + enableval=$enable_libtool_lock; +fi + +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + powerpc64le-*linux*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*linux*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +$as_echo_n "checking whether the C compiler needs -belf... " >&6; } +if ${lt_cv_cc_needs_belf+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_cc_needs_belf=yes +else + lt_cv_cc_needs_belf=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +$as_echo "$lt_cv_cc_needs_belf" >&6; } + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD="${LD-ld}_sol2" + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +set dummy ${ac_tool_prefix}mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$MANIFEST_TOOL"; then + ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +if test -n "$MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +$as_echo "$MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_MANIFEST_TOOL"; then + ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL + # Extract the first word of "mt", so it can be a program name with args. +set dummy mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_MANIFEST_TOOL"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +if test -n "$ac_ct_MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_MANIFEST_TOOL" = x; then + MANIFEST_TOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL + fi +else + MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +fi + +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +if ${lt_cv_path_mainfest_tool+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&5 + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +$as_echo "$lt_cv_path_mainfest_tool" >&6; } +if test "x$lt_cv_path_mainfest_tool" != xyes; then + MANIFEST_TOOL=: +fi + + + + + + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +$as_echo "$DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +$as_echo "$ac_ct_DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +$as_echo "$NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +$as_echo "$ac_ct_NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + NMEDIT=$ac_ct_NMEDIT + fi +else + NMEDIT="$ac_cv_prog_NMEDIT" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +$as_echo "$LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_LIPO="lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +$as_echo "$ac_ct_LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_LIPO" = x; then + LIPO=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + LIPO=$ac_ct_LIPO + fi +else + LIPO="$ac_cv_prog_LIPO" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +$as_echo "$OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL="otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +$as_echo "$ac_ct_OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +$as_echo "$OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +$as_echo "$ac_ct_OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 + fi +else + OTOOL64="$ac_cv_prog_OTOOL64" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +$as_echo_n "checking for -single_module linker flag... " >&6; } +if ${lt_cv_apple_cc_single_mod+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&5 + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test $_lt_result -eq 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +$as_echo "$lt_cv_apple_cc_single_mod" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } +if ${lt_cv_ld_exported_symbols_list+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_ld_exported_symbols_list=yes +else + lt_cv_ld_exported_symbols_list=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +$as_echo_n "checking for -force_load linker flag... " >&6; } +if ${lt_cv_ld_force_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 + echo "$RANLIB libconftest.a" >&5 + $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&5 + elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +$as_echo "$lt_cv_ld_force_load" >&6; } + case $host_os in + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[91]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[012]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac + +for ac_header in dlfcn.h +do : + ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default +" +if test "x$ac_cv_header_dlfcn_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_DLFCN_H 1 +_ACEOF + +fi + +done + + + +func_stripname_cnf () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname_cnf + + + + + +# Set options + + + + enable_dlopen=no + + + enable_win32_dll=no + + + # Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_shared=yes +fi + + + + + + + + + + + +# Check whether --with-pic was given. +if test "${with_pic+set}" = set; then : + withval=$with_pic; lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for lt_pkg in $withval; do + IFS="$lt_save_ifs" + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + pic_mode=default +fi + + +test -z "$pic_mode" && pic_mode=default + + + + + + + + # Check whether --enable-fast-install was given. +if test "${enable_fast_install+set}" = set; then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_fast_install=yes +fi + + + + + + + + + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +test -z "$LN_S" && LN_S="ln -s" + + + + + + + + + + + + + + +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +$as_echo_n "checking for objdir... " >&6; } +if ${lt_cv_objdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +$as_echo "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir + + + + + +cat >>confdefs.h <<_ACEOF +#define LT_OBJDIR "$lt_cv_objdir/" +_ACEOF + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/${ac_tool_prefix}file; then + lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +$as_echo_n "checking for file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/file; then + lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +# Use C for the default configuration in the libtool script + +lt_save_CC="$CC" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + +lt_prog_compiler_no_builtin_flag= + +if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; + *) + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + + + + + + + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' + if test -n "$lt_prog_compiler_pic"; then + lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-qpic' + lt_prog_compiler_static='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + *Sun\ F* | *Sun*Fortran*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + *Intel*\ [CF]*Compiler*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + *Portland\ Group*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + rdos*) + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +$as_echo "$lt_cv_prog_compiler_pic" >&6; } +lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if ${lt_cv_prog_compiler_pic_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } + +if test x"$lt_cv_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi + + + + + + + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +$as_echo "$lt_cv_prog_compiler_static_works" >&6; } + +if test x"$lt_cv_prog_compiler_static_works" = xyes; then + : +else + lt_prog_compiler_static= +fi + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test "$hard_links" = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + esac + + ld_shlibs=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; + *\ \(GNU\ Binutils\)\ [3-9]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + export_dynamic_flag_spec='${wl}--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global + # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + export_dynamic_flag_spec='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' ${wl}-bernotok' + allow_undefined_flag=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + always_export_symbols=yes + file_list_spec='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, )='true' + enable_shared_with_static_runtimes=yes + exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + old_postinstall_cmds='chmod 644 $oldlib' + postlink_cmds='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + enable_shared_with_static_runtimes=yes + ;; + esac + ;; + + darwin* | rhapsody*) + + + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes + allow_undefined_flag="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + + else + ld_shlibs=no + fi + + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +$as_echo_n "checking if $CC understands -b... " >&6; } +if ${lt_cv_prog_compiler__b+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler__b=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler__b=yes + fi + else + lt_cv_prog_compiler__b=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +$as_echo "$lt_cv_prog_compiler__b" >&6; } + +if test x"$lt_cv_prog_compiler__b" = xyes; then + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +fi + + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +if ${lt_cv_irix_exported_symbol+:} false; then : + $as_echo_n "(cached) " >&6 +else + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo (void) { return 0; } +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_irix_exported_symbol=yes +else + lt_cv_irix_exported_symbol=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +$as_echo "$lt_cv_irix_exported_symbol" >&6; } + if test "$lt_cv_irix_exported_symbol" = yes; then + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + else + ld_shlibs=no + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='${wl}-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='${wl}-z,text' + allow_undefined_flag='${wl}-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='${wl}-Blargedynsym' + ;; + esac + fi + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +$as_echo "$ld_shlibs" >&6; } +test "$ld_shlibs" = no && can_build_shared=no + +with_gnu_ld=$with_gnu_ld + + + + + + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no + else + lt_cv_archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;; + *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's,/\([A-Za-z]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Add ABI-specific directories to the system library path. + sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" + + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +$as_echo "$hardcode_action" >&6; } + +if test "$hardcode_action" = relink || + test "$inherit_rpath" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes; then : + lt_cv_dlopen="shl_load" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +$as_echo_n "checking for shl_load in -ldld... " >&6; } +if ${ac_cv_lib_dld_shl_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_shl_load=yes +else + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +$as_echo "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes; then : + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" +else + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +$as_echo_n "checking for dlopen in -lsvld... " >&6; } +if ${ac_cv_lib_svld_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_svld_dlopen=yes +else + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +$as_echo "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +$as_echo_n "checking for dld_link in -ldld... " >&6; } +if ${ac_cv_lib_dld_dld_link+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_dld_link=yes +else + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +$as_echo "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes; then : + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +$as_echo_n "checking whether a program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +$as_echo "$lt_cv_dlopen_self" >&6; } + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self_static+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +$as_echo "$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + + + + + + + + + + + + + + + + +striplib= +old_striplib= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +$as_echo_n "checking whether stripping libraries is possible... " >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +fi + + + + + + + + + + + + + # Report which library types will actually be built + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +$as_echo_n "checking if libtool supports shared libraries... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +$as_echo "$can_build_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +$as_echo_n "checking whether to build shared libraries... " >&6; } + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[4-9]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +$as_echo "$enable_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +$as_echo_n "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +$as_echo "$enable_static" >&6; } + + + + +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +CC="$lt_save_CC" + + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +else + _lt_caught_CXX_error=yes +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +compiler_needs_object_CXX=no +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_direct_absolute_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_shlibpath_var_CXX=unsupported +hardcode_automatic_CXX=no +inherit_rpath_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +reload_flag_CXX=$reload_flag +reload_cmds_CXX=$reload_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + + # save warnings/boilerplate of simple test code + ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + + ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + compiler_CXX=$CC + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' + else + lt_prog_compiler_no_builtin_flag_CXX= + fi + + if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + ld_shlibs_CXX=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + file_list_spec_CXX='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + export_dynamic_flag_spec_CXX='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + always_export_symbols_CXX=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_CXX='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' ${wl}-bernotok' + allow_undefined_flag_CXX=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + fi + archive_cmds_need_lc_CXX=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_CXX=' ' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=yes + file_list_spec_CXX='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' + enable_shared_with_static_runtimes_CXX=yes + # Don't use ranlib + old_postinstall_cmds_CXX='chmod 644 $oldlib' + postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_CXX=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + + + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec_CXX='' + fi + link_all_deplibs_CXX=yes + allow_undefined_flag_CXX="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + if test "$lt_cv_apple_cc_single_mod" != "yes"; then + archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi + + else + ld_shlibs_CXX=no + fi + + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + ld_shlibs_CXX=no + ;; + + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + + gnu*) + ;; + + haiku*) + archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs_CXX=yes + ;; + + hpux9*) + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='${wl}-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + export_dynamic_flag_spec_CXX='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + *) + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + interix[3-9]*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + inherit_rpath_CXX=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [1-5].* | *pgcpp\ [1-5].*) + prelink_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + old_archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + hardcode_libdir_flag_spec_CXX='-R$libdir' + whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object_CXX=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + ld_shlibs_CXX=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + ld_shlibs_CXX=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + hardcode_direct_absolute_CXX=yes + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='${wl}-E' + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + ld_shlibs_CXX=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + case $host in + osf3*) + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + ;; + *) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + ;; + esac + + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' + ;; + esac + link_all_deplibs_CXX=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + no_undefined_flag_CXX=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + + hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag_CXX='${wl}-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_CXX='${wl}-z,text' + allow_undefined_flag_CXX='${wl}-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + export_dynamic_flag_spec_CXX='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ + '"$old_archive_cmds_CXX" + reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ + '"$reload_cmds_CXX" + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } + test "$ld_shlibs_CXX" = no && can_build_shared=no + + GCC_CXX="$GXX" + LD_CXX="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + # Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF + + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test "$pre_test_object_deps_done" = no; then + case ${prev} in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX="${prev}${p}" + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX="${prev}${p}" + else + postdeps_CXX="${postdeps_CXX} ${prev}${p}" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX="$p" + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX="$p" + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +case $host_os in +interix[3-9]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + predep_objects_CXX= + postdep_objects_CXX= + postdeps_CXX= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + postdeps_CXX='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + postdeps_CXX='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac + + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + compiler_lib_search_dirs_CXX= +if test -n "${compiler_lib_search_path_CXX}"; then + compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= + + + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic_CXX='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static_CXX= + ;; + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix[4-9]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-qpic' + lt_prog_compiler_static_CXX='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd*) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac + fi + +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } +lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } +if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works_CXX=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } + +if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi + +fi + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works_CXX=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works_CXX=yes + fi + else + lt_cv_prog_compiler_static_works_CXX=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } + +if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then + : +else + lt_prog_compiler_static_CXX= +fi + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test "$hard_links" = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + case $host_os in + aix[4-9]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global defined + # symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + ;; + esac + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } +test "$ld_shlibs_CXX" = no && can_build_shared=no + +with_gnu_ld_CXX=$with_gnu_ld + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + pic_flag=$lt_prog_compiler_pic_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc_CXX=no + else + lt_cv_archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } + archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Add ABI-specific directories to the system library path. + sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" + + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || + test -n "$runpath_var_CXX" || + test "X$hardcode_automatic_CXX" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$hardcode_direct_CXX" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no && + test "$hardcode_minus_L_CXX" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 +$as_echo "$hardcode_action_CXX" >&6; } + +if test "$hardcode_action_CXX" = relink || + test "$inherit_rpath_CXX" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + + + + + + + + + + + + ac_config_commands="$ac_config_commands libtool" + + + + +# Only expand once: + + +# Extract the first word of "rm", so it can be a program name with args. +set dummy rm; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RM"; then + ac_cv_prog_RM="$RM" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RM="rm" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RM=$ac_cv_prog_RM +if test -n "$RM"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RM" >&5 +$as_echo "$RM" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +# Extract the first word of "cpp", so it can be a program name with args. +set dummy cpp; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ASCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ASCPP"; then + ac_cv_prog_ASCPP="$ASCPP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ASCPP="cpp" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ASCPP=$ac_cv_prog_ASCPP +if test -n "$ASCPP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ASCPP" >&5 +$as_echo "$ASCPP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="${ac_tool_prefix}ar" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="ar" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_AR" = x; then + AR="ar" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +else + AR="$ac_cv_prog_AR" +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing strerror" >&5 +$as_echo_n "checking for library containing strerror... " >&6; } +if ${ac_cv_search_strerror+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char strerror (); +int +main () +{ +return strerror (); + ; + return 0; +} +_ACEOF +for ac_lib in '' cposix; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_strerror=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_strerror+:} false; then : + break +fi +done +if ${ac_cv_search_strerror+:} false; then : + +else + ac_cv_search_strerror=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_strerror" >&5 +$as_echo "$ac_cv_search_strerror" >&6; } +ac_res=$ac_cv_search_strerror +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + + + + + + + + if test -n "$PYTHON"; then + # If the user set $PYTHON, use it and don't search something else. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $PYTHON version is >= 2.4" >&5 +$as_echo_n "checking whether $PYTHON version is >= 2.4... " >&6; } + prog="import sys +# split strings by '.' and convert to numeric. Append some zeros +# because we need at least 4 digits for the hex conversion. +# map returns an iterator in Python 3.0 and a list in 2.x +minver = list(map(int, '2.4'.split('.'))) + [0, 0, 0] +minverhex = 0 +# xrange is not present in Python 3.0 and range returns an iterator +for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[i] +sys.exit(sys.hexversion < minverhex)" + if { echo "$as_me:$LINENO: $PYTHON -c "$prog"" >&5 + ($PYTHON -c "$prog") >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + as_fn_error $? "Python interpreter is too old" "$LINENO" 5 +fi + am_display_PYTHON=$PYTHON + else + # Otherwise, try each interpreter until we find one that satisfies + # VERSION. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a Python interpreter with version >= 2.4" >&5 +$as_echo_n "checking for a Python interpreter with version >= 2.4... " >&6; } +if ${am_cv_pathless_PYTHON+:} false; then : + $as_echo_n "(cached) " >&6 +else + + for am_cv_pathless_PYTHON in python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do + test "$am_cv_pathless_PYTHON" = none && break + prog="import sys +# split strings by '.' and convert to numeric. Append some zeros +# because we need at least 4 digits for the hex conversion. +# map returns an iterator in Python 3.0 and a list in 2.x +minver = list(map(int, '2.4'.split('.'))) + [0, 0, 0] +minverhex = 0 +# xrange is not present in Python 3.0 and range returns an iterator +for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[i] +sys.exit(sys.hexversion < minverhex)" + if { echo "$as_me:$LINENO: $am_cv_pathless_PYTHON -c "$prog"" >&5 + ($am_cv_pathless_PYTHON -c "$prog") >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then : + break +fi + done +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_pathless_PYTHON" >&5 +$as_echo "$am_cv_pathless_PYTHON" >&6; } + # Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON. + if test "$am_cv_pathless_PYTHON" = none; then + PYTHON=: + else + # Extract the first word of "$am_cv_pathless_PYTHON", so it can be a program name with args. +set dummy $am_cv_pathless_PYTHON; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PYTHON+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PYTHON in + [\\/]* | ?:[\\/]*) + ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +PYTHON=$ac_cv_path_PYTHON +if test -n "$PYTHON"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5 +$as_echo "$PYTHON" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi + am_display_PYTHON=$am_cv_pathless_PYTHON + fi + + + if test "$PYTHON" = :; then + + : + + else + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $am_display_PYTHON version" >&5 +$as_echo_n "checking for $am_display_PYTHON version... " >&6; } +if ${am_cv_python_version+:} false; then : + $as_echo_n "(cached) " >&6 +else + am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[:3])"` +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_version" >&5 +$as_echo "$am_cv_python_version" >&6; } + PYTHON_VERSION=$am_cv_python_version + + + + PYTHON_PREFIX='${prefix}' + + PYTHON_EXEC_PREFIX='${exec_prefix}' + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $am_display_PYTHON platform" >&5 +$as_echo_n "checking for $am_display_PYTHON platform... " >&6; } +if ${am_cv_python_platform+:} false; then : + $as_echo_n "(cached) " >&6 +else + am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"` +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_platform" >&5 +$as_echo "$am_cv_python_platform" >&6; } + PYTHON_PLATFORM=$am_cv_python_platform + + + # Just factor out some code duplication. + am_python_setup_sysconfig="\ +import sys +# Prefer sysconfig over distutils.sysconfig, for better compatibility +# with python 3.x. See automake bug#10227. +try: + import sysconfig +except ImportError: + can_use_sysconfig = 0 +else: + can_use_sysconfig = 1 +# Can't use sysconfig in CPython 2.7, since it's broken in virtualenvs: +# +try: + from platform import python_implementation + if python_implementation() == 'CPython' and sys.version[:3] == '2.7': + can_use_sysconfig = 0 +except ImportError: + pass" + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $am_display_PYTHON script directory" >&5 +$as_echo_n "checking for $am_display_PYTHON script directory... " >&6; } +if ${am_cv_python_pythondir+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$prefix" = xNONE + then + am_py_prefix=$ac_default_prefix + else + am_py_prefix=$prefix + fi + am_cv_python_pythondir=`$PYTHON -c " +$am_python_setup_sysconfig +if can_use_sysconfig: + sitedir = sysconfig.get_path('purelib', vars={'base':'$am_py_prefix'}) +else: + from distutils import sysconfig + sitedir = sysconfig.get_python_lib(0, 0, prefix='$am_py_prefix') +sys.stdout.write(sitedir)"` + case $am_cv_python_pythondir in + $am_py_prefix*) + am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'` + am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"` + ;; + *) + case $am_py_prefix in + /usr|/System*) ;; + *) + am_cv_python_pythondir=$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages + ;; + esac + ;; + esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_pythondir" >&5 +$as_echo "$am_cv_python_pythondir" >&6; } + pythondir=$am_cv_python_pythondir + + + + pkgpythondir=\${pythondir}/$PACKAGE + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $am_display_PYTHON extension module directory" >&5 +$as_echo_n "checking for $am_display_PYTHON extension module directory... " >&6; } +if ${am_cv_python_pyexecdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$exec_prefix" = xNONE + then + am_py_exec_prefix=$am_py_prefix + else + am_py_exec_prefix=$exec_prefix + fi + am_cv_python_pyexecdir=`$PYTHON -c " +$am_python_setup_sysconfig +if can_use_sysconfig: + sitedir = sysconfig.get_path('platlib', vars={'platbase':'$am_py_prefix'}) +else: + from distutils import sysconfig + sitedir = sysconfig.get_python_lib(1, 0, prefix='$am_py_prefix') +sys.stdout.write(sitedir)"` + case $am_cv_python_pyexecdir in + $am_py_exec_prefix*) + am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'` + am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"` + ;; + *) + case $am_py_exec_prefix in + /usr|/System*) ;; + *) + am_cv_python_pyexecdir=$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages + ;; + esac + ;; + esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_pyexecdir" >&5 +$as_echo "$am_cv_python_pyexecdir" >&6; } + pyexecdir=$am_cv_python_pyexecdir + + + + pkgpyexecdir=\${pyexecdir}/$PACKAGE + + + + TS_MAN1_MANPAGES=`cd $srcdir/doc && $PYTHON manpages.py --section=1 | $AWK '{print "$(BUILDDIR)/man/" $0 }' | tr '\n' ' '` + TS_MAN3_MANPAGES=`cd $srcdir/doc && $PYTHON manpages.py --section=3 | $AWK '{print "$(BUILDDIR)/man/" $0 }' | tr '\n' ' '` + TS_MAN5_MANPAGES=`cd $srcdir/doc && $PYTHON manpages.py --section=5 | $AWK '{print "$(BUILDDIR)/man/" $0 }' | tr '\n' ' '` + TS_MAN8_MANPAGES=`cd $srcdir/doc && $PYTHON manpages.py --section=8 | $AWK '{print "$(BUILDDIR)/man/" $0 }' | tr '\n' ' '` + + # If we have python, check if the Sphinx version looks OK. + if "$PYTHON" "$srcdir/doc/checkvers.py" --check-version; then : + + sphinx_version_check=yes + +fi + + + fi + + + + + +# Extract the first word of "sphinx-build", so it can be a program name with args. +set dummy sphinx-build; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_SPHINXBUILD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$SPHINXBUILD"; then + ac_cv_prog_SPHINXBUILD="$SPHINXBUILD" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_SPHINXBUILD="sphinx-build" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_SPHINXBUILD" && ac_cv_prog_SPHINXBUILD="false" +fi +fi +SPHINXBUILD=$ac_cv_prog_SPHINXBUILD +if test -n "$SPHINXBUILD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SPHINXBUILD" >&5 +$as_echo "$SPHINXBUILD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build man pages" >&5 +$as_echo_n "checking whether to build man pages... " >&6; } +if test "x$sphinx_version_check" = "xyes" -a "x$SPHINXBUILD" != "xfalse"; then : + + if true; then + BUILD_MANPAGES_TRUE= + BUILD_MANPAGES_FALSE='#' +else + BUILD_MANPAGES_TRUE='#' + BUILD_MANPAGES_FALSE= +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + if false; then + BUILD_MANPAGES_TRUE= + BUILD_MANPAGES_FALSE='#' +else + BUILD_MANPAGES_TRUE='#' + BUILD_MANPAGES_FALSE= +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi + +# Do bison check by hand because we must do a version check. +# Use YACC because it makes autotools shut up. +BISON_MAJOR=2 +BISON_MINOR=4 +BISON_POINT=1 +# Extract the first word of "bison", so it can be a program name with args. +set dummy bison; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_YACC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$YACC"; then + ac_cv_prog_YACC="$YACC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_YACC="bison" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +YACC=$ac_cv_prog_YACC +if test -n "$YACC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $YACC" >&5 +$as_echo "$YACC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test -n "$YACC"; then : + bison_version_check=`$YACC --version 2>&1 | \ + $SED -n '/bison/s/^[^0-9]*\([0-9][0-9.]*\).*$/\1/p' | \ + $AWK "{ if (\\$1 > $BISON_MAJOR || (\\$1 == $BISON_MAJOR && (\\$2 > $BISON_MINOR || (\\$2 == $BISON_MINOR && (NR == 2 || \\$3 >= $BISON_POINT))))) print \"yes\"; else printf(\"version %d.%d.%d\",\\$1,\\$2,\\$3); }" FS=. \ + ` + if test "x$bison_version_check" != "xyes"; then : + YACC='' + if test -z "$bison_version_check"; then : + bison_version_check='no version data' + +fi + + +fi + +else + YACC='' + bison_version_check="nothing" + + +fi +# Check lex/flex by hand because we need flex of a sufficient version. +FLEX_MAJOR=2 +FLEX_MINOR=5 +FLEX_POINT=33 +for ac_prog in flex lex +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_LEX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$LEX"; then + ac_cv_prog_LEX="$LEX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_LEX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +LEX=$ac_cv_prog_LEX +if test -n "$LEX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LEX" >&5 +$as_echo "$LEX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$LEX" && break +done +test -n "$LEX" || LEX=":" + +if test "x$LEX" != "x:"; then + cat >conftest.l <<_ACEOF +%% +a { ECHO; } +b { REJECT; } +c { yymore (); } +d { yyless (1); } +e { /* IRIX 6.5 flex 2.5.4 underquotes its yyless argument. */ + yyless ((input () != 0)); } +f { unput (yytext[0]); } +. { BEGIN INITIAL; } +%% +#ifdef YYTEXT_POINTER +extern char *yytext; +#endif +int +main (void) +{ + return ! yylex () + ! yywrap (); +} +_ACEOF +{ { ac_try="$LEX conftest.l" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$LEX conftest.l") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking lex output file root" >&5 +$as_echo_n "checking lex output file root... " >&6; } +if ${ac_cv_prog_lex_root+:} false; then : + $as_echo_n "(cached) " >&6 +else + +if test -f lex.yy.c; then + ac_cv_prog_lex_root=lex.yy +elif test -f lexyy.c; then + ac_cv_prog_lex_root=lexyy +else + as_fn_error $? "cannot find output from $LEX; giving up" "$LINENO" 5 +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_lex_root" >&5 +$as_echo "$ac_cv_prog_lex_root" >&6; } +LEX_OUTPUT_ROOT=$ac_cv_prog_lex_root + +if test -z "${LEXLIB+set}"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking lex library" >&5 +$as_echo_n "checking lex library... " >&6; } +if ${ac_cv_lib_lex+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ac_save_LIBS=$LIBS + ac_cv_lib_lex='none needed' + for ac_lib in '' -lfl -ll; do + LIBS="$ac_lib $ac_save_LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +`cat $LEX_OUTPUT_ROOT.c` +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_lex=$ac_lib +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + test "$ac_cv_lib_lex" != 'none needed' && break + done + LIBS=$ac_save_LIBS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_lex" >&5 +$as_echo "$ac_cv_lib_lex" >&6; } + test "$ac_cv_lib_lex" != 'none needed' && LEXLIB=$ac_cv_lib_lex +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether yytext is a pointer" >&5 +$as_echo_n "checking whether yytext is a pointer... " >&6; } +if ${ac_cv_prog_lex_yytext_pointer+:} false; then : + $as_echo_n "(cached) " >&6 +else + # POSIX says lex can declare yytext either as a pointer or an array; the +# default is implementation-dependent. Figure out which it is, since +# not all implementations provide the %pointer and %array declarations. +ac_cv_prog_lex_yytext_pointer=no +ac_save_LIBS=$LIBS +LIBS="$LEXLIB $ac_save_LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #define YYTEXT_POINTER 1 +`cat $LEX_OUTPUT_ROOT.c` +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_prog_lex_yytext_pointer=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_save_LIBS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_lex_yytext_pointer" >&5 +$as_echo "$ac_cv_prog_lex_yytext_pointer" >&6; } +if test $ac_cv_prog_lex_yytext_pointer = yes; then + +$as_echo "#define YYTEXT_POINTER 1" >>confdefs.h + +fi +rm -f conftest.l $LEX_OUTPUT_ROOT.c + +fi +if test -n "$LEX"; then : + flex_version_check=`$LEX --version 2>&1 | \ + $SED -n '/flex/s/^[^0-9]*\([0-9][0-9.]*\)[^0-9]*.*$/\1/p' | \ + $AWK "{ if (\\$1 > $FLEX_MAJOR || (\\$1 == $FLEX_MAJOR && (\\$2 > $FLEX_MINOR || (\\$2 == $FLEX_MINOR && (NR == 2 || \\$3 >= $FLEX_POINT))))) print \"yes\"; else printf(\"version %d.%d.%d\",\\$1,\\$2,\\$3); }" FS=. \ + ` + if test "x$flex_version_check" != "xyes"; then : + LEX='' + if test -z "$flex_version_check"; then : + flex_version_check='no version data' + +fi + + +fi + +else + LEX='' + flex_version_check="nothing" + + +fi + +if test "x$enable_wccp" = "xyes"; then : + + if test -z "$LEX" ; then : + as_fn_error $? "Need flex version $FLEX_MAJOR.$FLEX_MINOR.$FLEX_POINT or better to enable WCCP (found $flex_version_check)" "$LINENO" 5 + +fi + + if test -z "$YACC" ; then : + as_fn_error $? "Need bison version $BISON_MAJOR.$BISON_MINOR.$BISON_POINT or better to enable WCCP (found $bison_version_check)" "$LINENO" 5 + +fi + + + +fi + +# Check for Perl and Doxygen +# Extract the first word of "doxygen", so it can be a program name with args. +set dummy doxygen; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_DOXYGEN+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $DOXYGEN in + [\\/]* | ?:[\\/]*) + ac_cv_path_DOXYGEN="$DOXYGEN" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_DOXYGEN="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +DOXYGEN=$ac_cv_path_DOXYGEN +if test -n "$DOXYGEN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DOXYGEN" >&5 +$as_echo "$DOXYGEN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + # needed for Doxygen +# Extract the first word of "perl", so it can be a program name with args. +set dummy perl; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PERL+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PERL in + [\\/]* | ?:[\\/]*) + ac_cv_path_PERL="$PERL" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PERL="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_PERL" && ac_cv_path_PERL="not found" + ;; +esac +fi +PERL=$ac_cv_path_PERL +if test -n "$PERL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PERL" >&5 +$as_echo "$PERL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test "x$PERL" = "xnot found"; then : + as_fn_error $? "check for perl failed. Have you installed perl?" "$LINENO" 5 + +fi + + + +# Check if MakeMaker is available + + + + + + +if test "x$PERL" != x; then + ax_perl_modules_failed=0 + for ax_perl_module in 'ExtUtils::MakeMaker' ; do + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for perl module $ax_perl_module" >&5 +$as_echo_n "checking for perl module $ax_perl_module... " >&6; } + + # Would be nice to log result here, but can't rely on autoconf internals + $PERL -e "use $ax_perl_module; exit" > /dev/null 2>&1 + if test $? -ne 0; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; }; + ax_perl_modules_failed=1 + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; }; + fi + done + + # Run optional shell commands + if test "$ax_perl_modules_failed" = 0; then + : + if true; then + BUILD_PERL_LIB_TRUE= + BUILD_PERL_LIB_FALSE='#' +else + BUILD_PERL_LIB_TRUE='#' + BUILD_PERL_LIB_FALSE= +fi + + else + : + if false; then + BUILD_PERL_LIB_TRUE= + BUILD_PERL_LIB_FALSE='#' +else + BUILD_PERL_LIB_TRUE='#' + BUILD_PERL_LIB_FALSE= +fi + + + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: could not find perl" >&5 +$as_echo "$as_me: WARNING: could not find perl" >&2;} +fi + +# Check for GNU-style -On optimization flags +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking checking whether to auto-set compiler optimization flags" >&5 +$as_echo_n "checking checking whether to auto-set compiler optimization flags... " >&6; } +has_optimizer_flags=`echo "$CFLAGS $CXXFLAGS" | ${AWK} '$0 !~ /-O.?/{print "no"}'` +if test "x${has_optimizer_flags}" = "xno"; then : + + optimizing_flags='-O3' + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes ${optimizing_flags}" >&5 +$as_echo "yes ${optimizing_flags}" >&6; } + +else + + has_optimizer_flags='yes' + optimizing_flags='' + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + +fi + + if false ; then + BUILD_HAVE_LIBCXX_TRUE= + BUILD_HAVE_LIBCXX_FALSE='#' +else + BUILD_HAVE_LIBCXX_TRUE='#' + BUILD_HAVE_LIBCXX_FALSE= +fi + + +case $host_os_def in + linux) + if test "x$ax_cv_c_compiler_vendor" = "xintel"; then : + + # -Wall goes crazy, so turned these specific checks off for now: + # + # 111 is "statement is unreachable" + # 279 is "controlling expression is constant", triggered by our asserts + # 383 is "value copied to temporary, reference to temporary used" + # 444 is "destructor for base class is not virtual" + # 522 is "function "xyz" redeclared "inline" after being called + # 873 is "has no corresponding operator delete". ToDo: we should fix. + # 981 is "operands are evaluated in unspecified order" + # 1418 is "external function definition with no prior declaration" + # 1419 is "external declaration in primary source file" + # 1572 is "floating-point equality and inequality comparisons are unreliable" + # 1720 is "operator new" has no corresponding member operator delete" + # 2256 is "non-pointer conversion from "int" to "unsigned char" " + # 2259 is "non-pointer conversion from "int" to "unsigned char" " + # + # TODO: We should try to eliminate more of these -wd exclusions. + common_opt="-pipe -Wall -wd111 -wd279 -wd383 -wd522 -wd444 -wd873 -wd981 -wd1418 -wd1419 -wd1572 -wd1720 -wd2256 -wd2259" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimization_flags -axsse4.2 -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + +fi + + if test "x$ax_cv_c_compiler_vendor" = "xclang"; then : + + debug_opt="-ggdb3 $common_opt -Qunused-arguments" + release_opt="-g $common_opt $optimizing_flags -fno-strict-aliasing -Qunused-arguments" + cxx_opt="-Wno-invalid-offsetof -Qunused-arguments" + +fi + + if test "x$ax_cv_c_compiler_vendor" = "xgnu"; then : + + # This is useful for finding odd conversions + # common_opt="-pipe -Wall -Wconversion -Wno-sign-conversion" + ts_am_common_flags="-Wunused-parameter" + common_opt="-pipe -Wall" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + # Special options for flex generated .c files + flex_cflags="-Wno-unused-parameter" + +fi + + ;; # linux) + + darwin) + if test "x$ax_cv_c_compiler_vendor" = "xclang"; then : + + common_opt="-pipe -Wall -Wno-deprecated-declarations -Qunused-arguments" + debug_opt="-g $common_opt" + release_opt="-g $common_opt $optimizing_flags -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: clang is the only supported compiler on Darwin" >&5 +$as_echo "$as_me: WARNING: clang is the only supported compiler on Darwin" >&2;} + +fi + + # Darwin needs to use libc++ for any C++11 code. + if true ; then + BUILD_HAVE_LIBCXX_TRUE= + BUILD_HAVE_LIBCXX_FALSE='#' +else + BUILD_HAVE_LIBCXX_TRUE='#' + BUILD_HAVE_LIBCXX_FALSE= +fi + + + # NOTE: This seems semi-kludgy, but useful for MacPorts I think. + if test -d /opt/local/include; then : + + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I/opt/local/include\"" + CPPFLAGS="-I/opt/local/include" + else + ats_addto_bugger="-I/opt/local/include" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + +fi + if test -d /opt/local/lib; then : + + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L/opt/local/lib\"" + LDFLAGS="-L/opt/local/lib" + else + ats_addto_bugger="-L/opt/local/lib" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + +fi + + ;; # darwin) + + freebsd|kfreebsd) + if test "x$ax_cv_c_compiler_vendor" = "xclang"; then : + + common_opt="-pipe -Wall -Qunused-arguments" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof -Qunused-arguments" + +fi + + if test "x$ax_cv_c_compiler_vendor" = "xgnu"; then : + + common_opt="-pipe -Wall" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + +fi + + if test -d /usr/local/lib; then : + + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L/usr/local/lib\"" + LDFLAGS="-L/usr/local/lib" + else + ats_addto_bugger="-L/usr/local/lib" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + +fi + + ;; # freebsd|kfreebsd) + + solaris) + if test "x$ax_cv_c_compiler_vendor" = "xgnu"; then : + + common_opt="-pipe -Wall" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + +fi + + ;; # solaris) + + *) + # Not sure what platform this is, but take a stab at some general GCC options ... + if test "x$ax_cv_c_compiler_vendor" = "xgnu"; then : + + common_opt="-pipe -Wall" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + +fi +esac + +# Only add -Werror if the user has requested it. We enable this by default for +# development, disable it by default for release. +if test x"$enable_werror" = xyes; then : + + + if test "x$release_opt" = "x"; then + test "x$verbose" = "xyes" && echo " setting release_opt to \"-Werror\"" + release_opt="-Werror" + else + ats_addto_bugger="-Werror" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $release_opt; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to release_opt" + release_opt="$release_opt $i" + fi + done + fi + + + if test "x$debug_opt" = "x"; then + test "x$verbose" = "xyes" && echo " setting debug_opt to \"-Werror\"" + debug_opt="-Werror" + else + ats_addto_bugger="-Werror" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $debug_opt; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to debug_opt" + debug_opt="$debug_opt $i" + fi + done + fi + + +fi + +cc_oflag_opt=$release_opt +cc_oflag_dbg=$debug_opt +cxx_oflag_opt="$release_opt $cxx_opt $cxx_rel" +cxx_oflag_dbg="$debug_opt $cxx_opt $cxx_dbg" + +AM_CFLAGS="$ts_am_common_flags" +AM_CXXFLAGS="$ts_am_common_flags" + + + +FLEX_CFLAGS=$flex_cflags + + +SHARED_CFLAGS=-fPIC +SHARED_LDFLAGS=-shared +SHARED_CXXFLAGS=-fPIC +SHARED_CXXLINKFLAGS=-shared + +# +# _Here_ is where we go ahead and add the _optimizations_ to already +# existing CFLAGS/CXXFLAGS if some special values had been set. +# +if test "x${enable_debug}" = "xyes"; then + + if test "x$CFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CFLAGS to \"${cc_oflag_dbg}\"" + CFLAGS="${cc_oflag_dbg}" + else + ats_addto_bugger="${cc_oflag_dbg}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CFLAGS" + CFLAGS="$CFLAGS $i" + fi + done + fi + + + if test "x$CXXFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CXXFLAGS to \"${cxx_oflag_dbg}\"" + CXXFLAGS="${cxx_oflag_dbg}" + else + ats_addto_bugger="${cxx_oflag_dbg}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CXXFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CXXFLAGS" + CXXFLAGS="$CXXFLAGS $i" + fi + done + fi + + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-DDEBUG -D_DEBUG\"" + CPPFLAGS="-DDEBUG -D_DEBUG" + else + ats_addto_bugger="-DDEBUG -D_DEBUG" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + +else + + if test "x$CFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CFLAGS to \"${cc_oflag_opt}\"" + CFLAGS="${cc_oflag_opt}" + else + ats_addto_bugger="${cc_oflag_opt}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CFLAGS" + CFLAGS="$CFLAGS $i" + fi + done + fi + + + if test "x$CXXFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CXXFLAGS to \"${cxx_oflag_opt}\"" + CXXFLAGS="${cxx_oflag_opt}" + else + ats_addto_bugger="${cxx_oflag_opt}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CXXFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CXXFLAGS" + CXXFLAGS="$CXXFLAGS $i" + fi + done + fi + +fi + +# Checks for pointer size +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void*" >&5 +$as_echo_n "checking size of void*... " >&6; } +if ${ac_cv_sizeof_voidp+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void*))" "ac_cv_sizeof_voidp" "$ac_includes_default"; then : + +else + if test "$ac_cv_type_voidp" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (void*) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_voidp=0 + fi +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_voidp" >&5 +$as_echo "$ac_cv_sizeof_voidp" >&6; } + + + +cat >>confdefs.h <<_ACEOF +#define SIZEOF_VOIDP $ac_cv_sizeof_voidp +_ACEOF + + +if test "x$ac_cv_sizeof_voidp" == "x"; then + as_fn_error $? "Cannot determine size of void*" "$LINENO" 5 +fi + + + +# +# Here are all the extra linux-specific C(XX)FLAGS additions and +# so forth. +# TODO cpu architecture settings separate from operating system settings +# +cpu_architecture="" +# GCC: add a default march if there is not one set +if test "x${GCC}" = "xyes"; then + if test "${ac_cv_sizeof_voidp}" = "4"; then + case "$host_cpu" in + i?86* | k5-8* | pentium* | athlon) + cpu_architecture="-march=i586" + ;; + esac + else + case "$host_cpu" in + x86_64 | amd64) + # XXX: Any need for 64-bit arch flags? + # cpu_architecture="-march=native" + ;; + esac + fi +fi + +# Overrride detected architecture with the user suplied one +# + +# Check whether --with-architecture was given. +if test "${with_architecture+set}" = set; then : + withval=$with_architecture; + if test "x$withval" != "xyes" && test "x$withval" != "xno"; then + case "$withval" in + -*) + # TODO: In case we are cross compiling some of the provided flags + # should be added to the LDFLAGS + cpu_architecture="$withval" + ;; + *) + cpu_architecture="-march=$withval" + ;; + esac + elif test "x$withval" = "x"; then + as_fn_error $? "--with-architecture requires an param" "$LINENO" 5 + fi + +fi + + +if test "x$cpu_architecture" != "x"; then + + if test "x$CFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CFLAGS to \"$cpu_architecture\"" + CFLAGS="$cpu_architecture" + else + ats_addto_bugger="$cpu_architecture" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CFLAGS" + CFLAGS="$CFLAGS $i" + fi + done + fi + + + if test "x$CXXFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CXXFLAGS to \"$cpu_architecture\"" + CXXFLAGS="$cpu_architecture" + else + ats_addto_bugger="$cpu_architecture" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CXXFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CXXFLAGS" + CXXFLAGS="$CXXFLAGS $i" + fi + done + fi + +fi + +# 64-bit LFS support +# + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-D_LARGEFILE64_SOURCE=1\"" + CPPFLAGS="-D_LARGEFILE64_SOURCE=1" + else + ats_addto_bugger="-D_LARGEFILE64_SOURCE=1" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + +if test "${ac_cv_sizeof_voidp}" = "8"; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-D_COMPILE64BIT_SOURCE=1\"" + CPPFLAGS="-D_COMPILE64BIT_SOURCE=1" + else + ats_addto_bugger="-D_COMPILE64BIT_SOURCE=1" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + +else + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-D_FILE_OFFSET_BITS=64\"" + CPPFLAGS="-D_FILE_OFFSET_BITS=64" + else + ats_addto_bugger="-D_FILE_OFFSET_BITS=64" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + +fi + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-D_GNU_SOURCE\"" + CPPFLAGS="-D_GNU_SOURCE" + else + ats_addto_bugger="-D_GNU_SOURCE" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-D_REENTRANT\"" + CPPFLAGS="-D_REENTRANT" + else + ats_addto_bugger="-D_REENTRANT" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: Build for host OS: $host_os, arch: $host_cpu, optimization: $host_os_def" >&5 +$as_echo "$as_me: Build for host OS: $host_os, arch: $host_cpu, optimization: $host_os_def" >&6;} + +# +# Note: These are site-specific macro's that do various tests +# on the selected compilers. There was some tunning +# associated with our not wanting to use GNU for _everything_. +# Note: This macro may set certain parameters when run. +# + +# +# CPP 11 API +# TODO: briang, this needs to be translated to a define +# that can be used in the c++ api to enable c++11 api features. +# +#AC_MSG_CHECKING([whether to enable c++11 api]) +#AC_ARG_WITH([cpp11api], +# [AS_HELP_STRING([--with-cpp11api],[enable support for cpp11api [default=auto]])], +# [cpp11api=$withval], +# [cpp11api=$enable_cxx_11_support] +#) +#AC_MSG_RESULT([$cpp11api]) +#AM_CONDITIONAL([BUILD_CPP11API], [test "$cpp11api" = yes]) + +# Check for ccache (if explicitly enabled) +if test "x$enable_ccache" = "xyes"; then + # Extract the first word of "ccache", so it can be a program name with args. +set dummy ccache; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CCACHE+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CCACHE"; then + ac_cv_prog_CCACHE="$CCACHE" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CCACHE="ccache" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CCACHE=$ac_cv_prog_CCACHE +if test -n "$CCACHE"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CCACHE" >&5 +$as_echo "$CCACHE" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + if test "x${CCACHE}" = "xccache"; then + CC="$CCACHE $CC" + CXX="$CCACHE $CXX" + fi +fi + +# ----------------------------------------------------------------------------- +# 4. CHECK FOR LIBRARIES + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing exc_capture_context" >&5 +$as_echo_n "checking for library containing exc_capture_context... " >&6; } +if ${ac_cv_search_exc_capture_context+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char exc_capture_context (); +int +main () +{ +return exc_capture_context (); + ; + return 0; +} +_ACEOF +for ac_lib in '' exc; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_exc_capture_context=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_exc_capture_context+:} false; then : + break +fi +done +if ${ac_cv_search_exc_capture_context+:} false; then : + +else + ac_cv_search_exc_capture_context=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_exc_capture_context" >&5 +$as_echo "$ac_cv_search_exc_capture_context" >&6; } +ac_res=$ac_cv_search_exc_capture_context +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing MLD_demangle_string" >&5 +$as_echo_n "checking for library containing MLD_demangle_string... " >&6; } +if ${ac_cv_search_MLD_demangle_string+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char MLD_demangle_string (); +int +main () +{ +return MLD_demangle_string (); + ; + return 0; +} +_ACEOF +for ac_lib in '' mld; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_MLD_demangle_string=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_MLD_demangle_string+:} false; then : + break +fi +done +if ${ac_cv_search_MLD_demangle_string+:} false; then : + +else + ac_cv_search_MLD_demangle_string=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_MLD_demangle_string" >&5 +$as_echo "$ac_cv_search_MLD_demangle_string" >&6; } +ac_res=$ac_cv_search_MLD_demangle_string +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 +$as_echo_n "checking for library containing dlopen... " >&6; } +if ${ac_cv_search_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +for ac_lib in '' dl; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_dlopen=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_dlopen+:} false; then : + break +fi +done +if ${ac_cv_search_dlopen+:} false; then : + +else + ac_cv_search_dlopen=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 +$as_echo "$ac_cv_search_dlopen" >&6; } +ac_res=$ac_cv_search_dlopen +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing socket" >&5 +$as_echo_n "checking for library containing socket... " >&6; } +if ${ac_cv_search_socket+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char socket (); +int +main () +{ +return socket (); + ; + return 0; +} +_ACEOF +for ac_lib in '' socket; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_socket=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_socket+:} false; then : + break +fi +done +if ${ac_cv_search_socket+:} false; then : + +else + ac_cv_search_socket=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_socket" >&5 +$as_echo "$ac_cv_search_socket" >&6; } +ac_res=$ac_cv_search_socket +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gethostbyname" >&5 +$as_echo_n "checking for library containing gethostbyname... " >&6; } +if ${ac_cv_search_gethostbyname+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char gethostbyname (); +int +main () +{ +return gethostbyname (); + ; + return 0; +} +_ACEOF +for ac_lib in '' nsl; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_gethostbyname=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_gethostbyname+:} false; then : + break +fi +done +if ${ac_cv_search_gethostbyname+:} false; then : + +else + ac_cv_search_gethostbyname=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gethostbyname" >&5 +$as_echo "$ac_cv_search_gethostbyname" >&6; } +ac_res=$ac_cv_search_gethostbyname +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5 +$as_echo_n "checking for library containing pthread_create... " >&6; } +if ${ac_cv_search_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +for ac_lib in '' pthread; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_pthread_create=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_pthread_create+:} false; then : + break +fi +done +if ${ac_cv_search_pthread_create+:} false; then : + +else + ac_cv_search_pthread_create=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5 +$as_echo "$ac_cv_search_pthread_create" >&6; } +ac_res=$ac_cv_search_pthread_create +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 +$as_echo_n "checking for library containing clock_gettime... " >&6; } +if ${ac_cv_search_clock_gettime+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char clock_gettime (); +int +main () +{ +return clock_gettime (); + ; + return 0; +} +_ACEOF +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_clock_gettime=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_clock_gettime+:} false; then : + break +fi +done +if ${ac_cv_search_clock_gettime+:} false; then : + +else + ac_cv_search_clock_gettime=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 +$as_echo "$ac_cv_search_clock_gettime" >&6; } +ac_res=$ac_cv_search_clock_gettime +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing clock_gettime" >&5 +$as_echo_n "checking for library containing clock_gettime... " >&6; } +if ${ac_cv_search_clock_gettime+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char clock_gettime (); +int +main () +{ +return clock_gettime (); + ; + return 0; +} +_ACEOF +for ac_lib in '' posix4; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_clock_gettime=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_clock_gettime+:} false; then : + break +fi +done +if ${ac_cv_search_clock_gettime+:} false; then : + +else + ac_cv_search_clock_gettime=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_clock_gettime" >&5 +$as_echo "$ac_cv_search_clock_gettime" >&6; } +ac_res=$ac_cv_search_clock_gettime +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for res_init in -lresolv" >&5 +$as_echo_n "checking for res_init in -lresolv... " >&6; } +if ${ac_cv_lib_resolv_res_init+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lresolv $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char res_init (); +int +main () +{ +return res_init (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_resolv_res_init=yes +else + ac_cv_lib_resolv_res_init=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_resolv_res_init" >&5 +$as_echo "$ac_cv_lib_resolv_res_init" >&6; } +if test "x$ac_cv_lib_resolv_res_init" = xyes; then : + LIBRESOLV="-lresolv" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __putlong in -lresolv" >&5 +$as_echo_n "checking for __putlong in -lresolv... " >&6; } +if ${ac_cv_lib_resolv___putlong+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lresolv $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char __putlong (); +int +main () +{ +return __putlong (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_resolv___putlong=yes +else + ac_cv_lib_resolv___putlong=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_resolv___putlong" >&5 +$as_echo "$ac_cv_lib_resolv___putlong" >&6; } +if test "x$ac_cv_lib_resolv___putlong" = xyes; then : + LIBRESOLV="-lresolv" + +fi + + +# Test for ncurses. We need to turn off -Werror because the C code in the +# ncurses compile tests does not generate unused variable warnings. +__saved_CFLAGS="$CFLAGS" + + if test "x$CFLAGS" = "x-Werror"; then + test "x$verbose" = "xyes" && echo " nulling CFLAGS" + CFLAGS="" + else + ats_new_bugger="" + ats_removed=0 + for i in $CFLAGS; do + if test "x$i" != "x-Werror"; then + ats_new_bugger="$ats_new_bugger $i" + else + ats_removed=1 + fi + done + if test $ats_removed = "1"; then + test "x$verbose" = "xyes" && echo " removed \"-Werror\" from CFLAGS" + CFLAGS=$ats_new_bugger + fi + fi + + + + +# Check whether --with-ncurses was given. +if test "${with_ncurses+set}" = set; then : + withval=$with_ncurses; +else + with_ncurses=check +fi + + +# Check whether --with-ncursesw was given. +if test "${with_ncursesw+set}" = set; then : + withval=$with_ncursesw; +else + with_ncursesw=check +fi + + + ax_saved_LIBS=$LIBS + if test "x$with_ncurses" = xyes || test "x$with_ncursesw" = xyes; then : + ax_with_plaincurses=no +else + ax_with_plaincurses=check +fi + + ax_cv_curses_which=no + + # Test for NcursesW + + if test "x$CURSES_LIB" = x && test "x$with_ncursesw" != xno; then : + + LIBS="$ax_saved_LIBS -lncursesw" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for NcursesW wide-character library" >&5 +$as_echo_n "checking for NcursesW wide-character library... " >&6; } +if ${ax_cv_ncursesw+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char initscr (); +int +main () +{ +return initscr (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_ncursesw=yes +else + ax_cv_ncursesw=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_ncursesw" >&5 +$as_echo "$ax_cv_ncursesw" >&6; } + if test "x$ax_cv_ncursesw" = xno && test "x$with_ncursesw" = xyes; then : + + as_fn_error $? "--with-ncursesw specified but could not find NcursesW library" "$LINENO" 5 + +fi + + if test "x$ax_cv_ncursesw" = xyes; then : + + ax_cv_curses=yes + ax_cv_curses_which=ncursesw + CURSES_LIB="-lncursesw" + +$as_echo "#define HAVE_NCURSESW 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES 1" >>confdefs.h + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working ncursesw/curses.h" >&5 +$as_echo_n "checking for working ncursesw/curses.h... " >&6; } +if ${ax_cv_header_ncursesw_curses_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #define _XOPEN_SOURCE_EXTENDED 1 + #include + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + attr_t d = WA_NORMAL; + cchar_t e; + wint_t f; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + wattr_set(stdscr, d, 0, NULL); + wget_wch(stdscr, &f); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_header_ncursesw_curses_h=yes +else + ax_cv_header_ncursesw_curses_h=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_header_ncursesw_curses_h" >&5 +$as_echo "$ax_cv_header_ncursesw_curses_h" >&6; } + if test "x$ax_cv_header_ncursesw_curses_h" = xyes; then : + + ax_cv_curses_enhanced=yes + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + +$as_echo "#define HAVE_CURSES_ENHANCED 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_COLOR 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_OBSOLETE 1" >>confdefs.h + + +$as_echo "#define HAVE_NCURSESW_CURSES_H 1" >>confdefs.h + + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working ncursesw.h" >&5 +$as_echo_n "checking for working ncursesw.h... " >&6; } +if ${ax_cv_header_ncursesw_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #define _XOPEN_SOURCE_EXTENDED 1 + #include + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + attr_t d = WA_NORMAL; + cchar_t e; + wint_t f; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + wattr_set(stdscr, d, 0, NULL); + wget_wch(stdscr, &f); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_header_ncursesw_h=yes +else + ax_cv_header_ncursesw_h=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_header_ncursesw_h" >&5 +$as_echo "$ax_cv_header_ncursesw_h" >&6; } + if test "x$ax_cv_header_ncursesw_h" = xyes; then : + + ax_cv_curses_enhanced=yes + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + +$as_echo "#define HAVE_CURSES_ENHANCED 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_COLOR 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_OBSOLETE 1" >>confdefs.h + + +$as_echo "#define HAVE_NCURSESW_H 1" >>confdefs.h + + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working ncurses.h" >&5 +$as_echo_n "checking for working ncurses.h... " >&6; } +if ${ax_cv_header_ncurses_h_with_ncursesw+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #define _XOPEN_SOURCE_EXTENDED 1 + #include + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + attr_t d = WA_NORMAL; + cchar_t e; + wint_t f; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + wattr_set(stdscr, d, 0, NULL); + wget_wch(stdscr, &f); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_header_ncurses_h_with_ncursesw=yes +else + ax_cv_header_ncurses_h_with_ncursesw=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_header_ncurses_h_with_ncursesw" >&5 +$as_echo "$ax_cv_header_ncurses_h_with_ncursesw" >&6; } + if test "x$ax_cv_header_ncurses_h_with_ncursesw" = xyes; then : + + ax_cv_curses_enhanced=yes + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + +$as_echo "#define HAVE_CURSES_ENHANCED 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_COLOR 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_OBSOLETE 1" >>confdefs.h + + +$as_echo "#define HAVE_NCURSES_H 1" >>confdefs.h + + +fi + + if test "x$ax_cv_header_ncursesw_curses_h" = xno && test "x$ax_cv_header_ncursesw_h" = xno && test "x$ax_cv_header_ncurses_h_with_ncursesw" = xno; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: could not find a working ncursesw/curses.h, ncursesw.h or ncurses.h" >&5 +$as_echo "$as_me: WARNING: could not find a working ncursesw/curses.h, ncursesw.h or ncurses.h" >&2;} + +fi + +fi + +fi + + # Test for Ncurses + + if test "x$CURSES_LIB" = x && test "x$with_ncurses" != xno && test "x$ax_cv_curses_which" = xno; then : + + LIBS="$ax_saved_LIBS -lncurses" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Ncurses library" >&5 +$as_echo_n "checking for Ncurses library... " >&6; } +if ${ax_cv_ncurses+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char initscr (); +int +main () +{ +return initscr (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_ncurses=yes +else + ax_cv_ncurses=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_ncurses" >&5 +$as_echo "$ax_cv_ncurses" >&6; } + if test "x$ax_cv_ncurses" = xno && test "x$with_ncurses" = xyes; then : + + as_fn_error $? "--with-ncurses specified but could not find Ncurses library" "$LINENO" 5 + +fi + + if test "x$ax_cv_ncurses" = xyes; then : + + ax_cv_curses=yes + ax_cv_curses_which=ncurses + CURSES_LIB="-lncurses" + +$as_echo "#define HAVE_NCURSES 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES 1" >>confdefs.h + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working ncurses/curses.h" >&5 +$as_echo_n "checking for working ncurses/curses.h... " >&6; } +if ${ax_cv_header_ncurses_curses_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_header_ncurses_curses_h=yes +else + ax_cv_header_ncurses_curses_h=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_header_ncurses_curses_h" >&5 +$as_echo "$ax_cv_header_ncurses_curses_h" >&6; } + if test "x$ax_cv_header_ncurses_curses_h" = xyes; then : + + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + +$as_echo "#define HAVE_CURSES_COLOR 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_OBSOLETE 1" >>confdefs.h + + +$as_echo "#define HAVE_NCURSES_CURSES_H 1" >>confdefs.h + + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working ncurses.h" >&5 +$as_echo_n "checking for working ncurses.h... " >&6; } +if ${ax_cv_header_ncurses_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_header_ncurses_h=yes +else + ax_cv_header_ncurses_h=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_header_ncurses_h" >&5 +$as_echo "$ax_cv_header_ncurses_h" >&6; } + if test "x$ax_cv_header_ncurses_h" = xyes; then : + + ax_cv_curses_color=yes + ax_cv_curses_obsolete=yes + +$as_echo "#define HAVE_CURSES_COLOR 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_OBSOLETE 1" >>confdefs.h + + +$as_echo "#define HAVE_NCURSES_H 1" >>confdefs.h + + +fi + + if test "x$ax_cv_header_ncurses_curses_h" = xno && test "x$ax_cv_header_ncurses_h" = xno; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: could not find a working ncurses/curses.h or ncurses.h" >&5 +$as_echo "$as_me: WARNING: could not find a working ncurses/curses.h or ncurses.h" >&2;} + +fi + +fi + +fi + + # Test for plain Curses (or if CURSES_LIB was set by user) + + if test "x$with_plaincurses" != xno && test "x$ax_cv_curses_which" = xno; then : + + if test "x$CURSES_LIB" != x; then : + + LIBS="$ax_saved_LIBS $CURSES_LIB" + +else + + LIBS="$ax_saved_LIBS -lcurses" + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Curses library" >&5 +$as_echo_n "checking for Curses library... " >&6; } +if ${ax_cv_plaincurses+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char initscr (); +int +main () +{ +return initscr (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_plaincurses=yes +else + ax_cv_plaincurses=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_plaincurses" >&5 +$as_echo "$ax_cv_plaincurses" >&6; } + + if test "x$ax_cv_plaincurses" = xyes; then : + + ax_cv_curses=yes + ax_cv_curses_which=plaincurses + if test "x$CURSES_LIB" = x; then : + + CURSES_LIB="-lcurses" + +fi + +$as_echo "#define HAVE_CURSES 1" >>confdefs.h + + + # Check for base conformance (and header file) + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working curses.h" >&5 +$as_echo_n "checking for working curses.h... " >&6; } +if ${ax_cv_header_curses_h+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + initscr(); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_header_curses_h=yes +else + ax_cv_header_curses_h=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_header_curses_h" >&5 +$as_echo "$ax_cv_header_curses_h" >&6; } + if test "x$ax_cv_header_curses_h" = xyes; then : + + +$as_echo "#define HAVE_CURSES_H 1" >>confdefs.h + + + # Check for X/Open Enhanced conformance + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for X/Open Enhanced Curses conformance" >&5 +$as_echo_n "checking for X/Open Enhanced Curses conformance... " >&6; } +if ${ax_cv_plaincurses_enhanced+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #define _XOPEN_SOURCE_EXTENDED 1 + #include + #ifndef _XOPEN_CURSES + #error "this Curses library is not enhanced" + "this Curses library is not enhanced" + #endif + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + attr_t d = WA_NORMAL; + cchar_t e; + wint_t f; + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + wattr_set(stdscr, d, 0, NULL); + wget_wch(stdscr, &f); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_plaincurses_enhanced=yes +else + ax_cv_plaincurses_enhanced=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_plaincurses_enhanced" >&5 +$as_echo "$ax_cv_plaincurses_enhanced" >&6; } + if test "x$ax_cv_plaincurses_enhanced" = xyes; then : + + ax_cv_curses_enhanced=yes + ax_cv_curses_color=yes + +$as_echo "#define HAVE_CURSES_ENHANCED 1" >>confdefs.h + + +$as_echo "#define HAVE_CURSES_COLOR 1" >>confdefs.h + + +fi + + # Check for color functions + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Curses color functions" >&5 +$as_echo_n "checking for Curses color functions... " >&6; } +if ${ax_cv_plaincurses_color+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #define _XOPEN_SOURCE_EXTENDED 1 + #include + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + chtype c = COLOR_PAIR(1) & A_COLOR; + initscr(); + init_pair(1, COLOR_WHITE, COLOR_RED); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_plaincurses_color=yes +else + ax_cv_plaincurses_color=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_plaincurses_color" >&5 +$as_echo "$ax_cv_plaincurses_color" >&6; } + if test "x$ax_cv_plaincurses_color" = xyes; then : + + ax_cv_curses_color=yes + +$as_echo "#define HAVE_CURSES_COLOR 1" >>confdefs.h + + +fi + + # Check for obsolete functions + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for obsolete Curses functions" >&5 +$as_echo_n "checking for obsolete Curses functions... " >&6; } +if ${ax_cv_plaincurses_obsolete+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + chtype a = A_BOLD; + int b = KEY_LEFT; + int g = getattrs(stdscr); + int h = getcurx(stdscr) + getmaxx(stdscr); + initscr(); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_plaincurses_obsolete=yes +else + ax_cv_plaincurses_obsolete=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_plaincurses_obsolete" >&5 +$as_echo "$ax_cv_plaincurses_obsolete" >&6; } + if test "x$ax_cv_plaincurses_obsolete" = xyes; then : + + ax_cv_curses_obsolete=yes + +$as_echo "#define HAVE_CURSES_OBSOLETE 1" >>confdefs.h + + +fi + +fi + + if test "x$ax_cv_header_curses_h" = xno; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: could not find a working curses.h" >&5 +$as_echo "$as_me: WARNING: could not find a working curses.h" >&2;} + +fi + +fi + +fi + + if test "x$ax_cv_curses" != xyes; then : + ax_cv_curses=no +fi + if test "x$ax_cv_curses_enhanced" != xyes; then : + ax_cv_curses_enhanced=no +fi + if test "x$ax_cv_curses_color" != xyes; then : + ax_cv_curses_color=no +fi + if test "x$ax_cv_curses_obsolete" != xyes; then : + ax_cv_curses_obsolete=no +fi + + LIBS=$ax_saved_LIBS + +CFLAGS="$__saved_CFLAGS" + + + + + + + +# Check whether --with-curl-prefix was given. +if test "${with_curl_prefix+set}" = set; then : + withval=$with_curl_prefix; curl_config_prefix="$withval" +else + curl_config_prefix="" +fi + + +# Check whether --with-curl-exec-prefix was given. +if test "${with_curl_exec_prefix+set}" = set; then : + withval=$with_curl_exec_prefix; curl_config_exec_prefix="$withval" +else + curl_config_exec_prefix="" +fi + + + + + + + if test x$CURL_CFLAGS != x -o x$CURL_LIBS != x; then : + + + + : + + if test "x$ax_cv_curses" = "xyes"; then + BUILD_TSTOP_TRUE= + BUILD_TSTOP_FALSE='#' +else + BUILD_TSTOP_TRUE='#' + BUILD_TSTOP_FALSE= +fi + + + +else + + if test x$curl_config_exec_prefix != x; then : + + curl_config_args="$curl_config_args --exec-prefix=$curl_config_exec_prefix" + if test x${CURL_CONFIG+set} != xset; then : + + CURL_CONFIG=$curl_config_exec_prefix/bin/curl-config + +fi + +fi + if test x$curl_config_prefix != x; then : + + curl_config_args="$curl_config_args --prefix=$curl_config_prefix" + if test x${CURL_CONFIG+set} != xset; then : + + CURL_CONFIG=$curl_config_prefix/bin/curl-config + +fi + +fi + + for ac_prog in curl-config +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_CURL_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $CURL_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_CURL_CONFIG="$CURL_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_CURL_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +CURL_CONFIG=$ac_cv_path_CURL_CONFIG +if test -n "$CURL_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CURL_CONFIG" >&5 +$as_echo "$CURL_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CURL_CONFIG" && break +done +test -n "$CURL_CONFIG" || CURL_CONFIG="no" + + if test "$CURL_CONFIG" == "no"; then : + + : + + if false; then + BUILD_TSTOP_TRUE= + BUILD_TSTOP_FALSE='#' +else + BUILD_TSTOP_TRUE='#' + BUILD_TSTOP_FALSE= +fi + + + + +else + + if test x"" == x; then : + + CURL_CFLAGS="`$CURL_CONFIG $curl_config_args --cflags`" + +else + + CURL_CFLAGS="`$CURL_CONFIG $curl_config_args `" + +fi + + if test x"" == x; then : + + CURL_LIBS="`$CURL_CONFIG $curl_config_args --libs`" + +else + + CURL_LIBS="`$CURL_CONFIG $curl_config_args `" + +fi + + if test x"7.19" != x; then : + + if test x"'s/^libcurl\ \+//'" != x; then : + + curl_version="`$CURL_CONFIG $curl_config_args --version | $SED -e 's/^libcurl\ \+//'`" + +else + + curl_version="`$CURL_CONFIG $curl_config_args --version | $SED -e 's/^\ *\(.*\)\ *$/\1/'`" + +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for curl ($curl_version) >= 7.19" >&5 +$as_echo_n "checking for curl ($curl_version) >= 7.19... " >&6; } + + + + # Used to indicate true or false condition + ax_compare_version=false + + # Convert the two version strings to be compared into a format that + # allows a simple string comparison. The end result is that a version + # string of the form 1.12.5-r617 will be converted to the form + # 0001001200050617. In other words, each number is zero padded to four + # digits, and non digits are removed. + + ax_compare_version_A=`echo "$curl_version" | sed -e 's/\([0-9]*\)/Z\1Z/g' \ + -e 's/Z\([0-9]\)Z/Z0\1Z/g' \ + -e 's/Z\([0-9][0-9]\)Z/Z0\1Z/g' \ + -e 's/Z\([0-9][0-9][0-9]\)Z/Z0\1Z/g' \ + -e 's/[^0-9]//g'` + + + ax_compare_version_B=`echo "7.19" | sed -e 's/\([0-9]*\)/Z\1Z/g' \ + -e 's/Z\([0-9]\)Z/Z0\1Z/g' \ + -e 's/Z\([0-9][0-9]\)Z/Z0\1Z/g' \ + -e 's/Z\([0-9][0-9][0-9]\)Z/Z0\1Z/g' \ + -e 's/[^0-9]//g'` + + + ax_compare_version=`echo "x$ax_compare_version_A +x$ax_compare_version_B" | sed 's/^ *//' | sort -r | sed "s/x${ax_compare_version_A}/true/;s/x${ax_compare_version_B}/false/;1q"` + + + + if test "$ax_compare_version" = "true" ; then + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + + + : + + if test "x$ax_cv_curses" = "xyes"; then + BUILD_TSTOP_TRUE= + BUILD_TSTOP_FALSE='#' +else + BUILD_TSTOP_TRUE='#' + BUILD_TSTOP_FALSE= +fi + + + + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + : + + if false; then + BUILD_TSTOP_TRUE= + BUILD_TSTOP_FALSE='#' +else + BUILD_TSTOP_TRUE='#' + BUILD_TSTOP_FALSE= +fi + + + + + fi + + +else + + + + : + + if test "x$ax_cv_curses" = "xyes"; then + BUILD_TSTOP_TRUE= + BUILD_TSTOP_FALSE='#' +else + BUILD_TSTOP_TRUE='#' + BUILD_TSTOP_FALSE= +fi + + + +fi + +fi + +fi + + + + + + +# +# The header_rewrite module depends on boost. + + +# Check whether --with-boost was given. +if test "${with_boost+set}" = set; then : + withval=$with_boost; + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ac_boost_path="" + else + want_boost="yes" + ac_boost_path="$withval" + fi + +else + want_boost="yes" +fi + + + + +# Check whether --with-boost-libdir was given. +if test "${with_boost_libdir+set}" = set; then : + withval=$with_boost_libdir; + if test -d "$withval" + then + ac_boost_lib_path="$withval" + else + as_fn_error $? "--with-boost-libdir expected directory name" "$LINENO" 5 + fi + +else + ac_boost_lib_path="" + +fi + + +if test "x$want_boost" = "xyes"; then + boost_lib_version_req=1.33 + boost_lib_version_req_shorten=`expr $boost_lib_version_req : '\([0-9]*\.[0-9]*\)'` + boost_lib_version_req_major=`expr $boost_lib_version_req : '\([0-9]*\)'` + boost_lib_version_req_minor=`expr $boost_lib_version_req : '[0-9]*\.\([0-9]*\)'` + boost_lib_version_req_sub_minor=`expr $boost_lib_version_req : '[0-9]*\.[0-9]*\.\([0-9]*\)'` + if test "x$boost_lib_version_req_sub_minor" = "x" ; then + boost_lib_version_req_sub_minor="0" + fi + WANT_BOOST_VERSION=`expr $boost_lib_version_req_major \* 100000 \+ $boost_lib_version_req_minor \* 100 \+ $boost_lib_version_req_sub_minor` + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for boostlib >= $boost_lib_version_req" >&5 +$as_echo_n "checking for boostlib >= $boost_lib_version_req... " >&6; } + succeeded=no + + libsubdirs="lib" + ax_arch=`uname -m` + if test $ax_arch = x86_64 -o $ax_arch = ppc64 -o $ax_arch = s390x -o $ax_arch = sparc64; then + libsubdirs="lib64 lib lib64" + fi + + if test "$ac_boost_path" != ""; then + BOOST_CPPFLAGS="-I$ac_boost_path/include" + for ac_boost_path_tmp in $libsubdirs; do + if test -d "$ac_boost_path"/"$ac_boost_path_tmp" ; then + BOOST_LDFLAGS="-L$ac_boost_path/$ac_boost_path_tmp" + break + fi + done + elif test "$cross_compiling" != yes; then + for ac_boost_path_tmp in /usr /usr/local /opt /opt/local ; do + if test -d "$ac_boost_path_tmp/include/boost" && test -r "$ac_boost_path_tmp/include/boost"; then + for libsubdir in $libsubdirs ; do + if ls "$ac_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$ac_boost_path_tmp/$libsubdir" + BOOST_CPPFLAGS="-I$ac_boost_path_tmp/include" + break; + fi + done + fi + + if test "$ac_boost_lib_path" != ""; then + BOOST_LDFLAGS="-L$ac_boost_lib_path" + fi + + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + #if BOOST_VERSION >= $WANT_BOOST_VERSION + // Everything is okay + #else + # error Boost version is too old + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + succeeded=yes + found_system=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + + + if test "x$succeeded" != "xyes"; then + _version=0 + if test "$ac_boost_path" != ""; then + if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then + for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "$V_CHECK" = "1" ; then + _version=$_version_tmp + fi + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$ac_boost_path/include/boost-$VERSION_UNDERSCORE" + done + fi + else + if test "$cross_compiling" != yes; then + for ac_boost_path in /usr /usr/local /opt /opt/local ; do + if test -d "$ac_boost_path" && test -r "$ac_boost_path"; then + for i in `ls -d $ac_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$ac_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "$V_CHECK" = "1" ; then + _version=$_version_tmp + best_path=$ac_boost_path + fi + done + fi + done + + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE" + if test "$ac_boost_lib_path" = ""; then + for libsubdir in $libsubdirs ; do + if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$best_path/$libsubdir" + fi + fi + + if test "x$BOOST_ROOT" != "x"; then + for libsubdir in $libsubdirs ; do + if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then + version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'` + stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'` + stage_version_shorten=`expr $stage_version : '\([0-9]*\.[0-9]*\)'` + V_CHECK=`expr $stage_version_shorten \>\= $_version` + if test "$V_CHECK" = "1" -a "$ac_boost_lib_path" = "" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: We will use a staged boost library from $BOOST_ROOT" >&5 +$as_echo "$as_me: We will use a staged boost library from $BOOST_ROOT" >&6;} + BOOST_CPPFLAGS="-I$BOOST_ROOT" + BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir" + fi + fi + fi + fi + + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + +int +main () +{ + + #if BOOST_VERSION >= $WANT_BOOST_VERSION + // Everything is okay + #else + # error Boost version is too old + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + succeeded=yes + found_system=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + fi + + if test "$succeeded" != "yes" ; then + if test "$_version" = "0" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: We could not detect the boost libraries (version $boost_lib_version_req_shorten or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation." >&5 +$as_echo "$as_me: We could not detect the boost libraries (version $boost_lib_version_req_shorten or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation." >&6;} + else + { $as_echo "$as_me:${as_lineno-$LINENO}: Your boost libraries seems to old (version $_version)." >&5 +$as_echo "$as_me: Your boost libraries seems to old (version $_version)." >&6;} + fi + # execute ACTION-IF-NOT-FOUND (if present): + if false; then + BUILD_HAVE_BOOST_TRUE= + BUILD_HAVE_BOOST_FALSE='#' +else + BUILD_HAVE_BOOST_TRUE='#' + BUILD_HAVE_BOOST_FALSE= +fi + + + else + + + +$as_echo "#define HAVE_BOOST /**/" >>confdefs.h + + # execute ACTION-IF-FOUND (if present): + if true; then + BUILD_HAVE_BOOST_TRUE= + BUILD_HAVE_BOOST_FALSE='#' +else + BUILD_HAVE_BOOST_TRUE='#' + BUILD_HAVE_BOOST_FALSE= +fi + + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" +fi + + + +# +# Check for SSL presence and usability + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing crypt" >&5 +$as_echo_n "checking for library containing crypt... " >&6; } +if ${ac_cv_search_crypt+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char crypt (); +int +main () +{ +return crypt (); + ; + return 0; +} +_ACEOF +for ac_lib in '' crypt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_crypt=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_crypt+:} false; then : + break +fi +done +if ${ac_cv_search_crypt+:} false; then : + +else + ac_cv_search_crypt=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_crypt" >&5 +$as_echo "$ac_cv_search_crypt" >&6; } +ac_res=$ac_cv_search_crypt +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + LIBCRYPT="-lcrypt" + +fi + + + + found=false + overriden_with_ssl=false + +# Check whether --with-openssl was given. +if test "${with_openssl+set}" = set; then : + withval=$with_openssl; + case "$withval" in + "" | y | ye | yes | n | no) + as_fn_error $? "Invalid --with-openssl value" "$LINENO" 5 + ;; + *) ssldirs="$withval" + overriden_with_ssl=true + ;; + esac + +else + + # if pkg-config is installed and openssl has installed a .pc file, + # then use that information and don't search ssldirs + # Extract the first word of "pkg-config", so it can be a program name with args. +set dummy pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PKG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PKG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +PKG_CONFIG=$ac_cv_path_PKG_CONFIG +if test -n "$PKG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 +$as_echo "$PKG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + if test x"$PKG_CONFIG" != x""; then + OPENSSL_LDFLAGS=`$PKG_CONFIG openssl --libs-only-L 2>/dev/null` + if test $? = 0; then + OPENSSL_LIBS=`$PKG_CONFIG openssl --libs-only-l 2>/dev/null` + OPENSSL_INCLUDES=`$PKG_CONFIG openssl --cflags-only-I 2>/dev/null` + found=true + fi + fi + + # no such luck; use some default ssldirs + if ! $found; then + ssldirs="/usr/local/ssl /usr/lib/ssl /usr/ssl /usr/pkg /usr/sfw /usr/local /opt/local /usr" + fi + + +fi + + + + # note that we #include , so the OpenSSL headers have to be in + # an 'openssl' subdirectory + + if ! $found; then + OPENSSL_INCLUDES= + for ssldir in $ssldirs; do + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for openssl/ssl.h in $ssldir" >&5 +$as_echo_n "checking for openssl/ssl.h in $ssldir... " >&6; } + if test -f "$ssldir/include/openssl/ssl.h"; then + OPENSSL_INCLUDES="-I$ssldir/include" + if test -d "$ssldir/lib64"; then + OPENSSL_LDFLAGS="-L$ssldir/lib64" + if test "$overriden_with_ssl"; then + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-rpath ${ssldir}/lib64\"" + LIBTOOL_LINK_FLAGS="-rpath ${ssldir}/lib64" + else + ats_addto_bugger="-rpath ${ssldir}/lib64" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + else + OPENSSL_LDFLAGS="-L$ssldir/lib" + if test "$overriden_with_ssl"; then + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-rpath ${ssldir}/lib\"" + LIBTOOL_LINK_FLAGS="-rpath ${ssldir}/lib" + else + ats_addto_bugger="-rpath ${ssldir}/lib" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + fi + OPENSSL_LIBS="-lssl -lcrypto" + found=true + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + break + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + done + + # if the file wasn't found, well, go ahead and try the link anyway -- maybe + # it will just work! + fi + + # try the preprocessor and linker with our new flags, + # being careful not to pollute the global LIBS, LDFLAGS, and CPPFLAGS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiling and linking against OpenSSL works" >&5 +$as_echo_n "checking whether compiling and linking against OpenSSL works... " >&6; } + echo "Trying link with OPENSSL_LDFLAGS=$OPENSSL_LDFLAGS;" \ + "OPENSSL_LIBS=$OPENSSL_LIBS; OPENSSL_INCLUDES=$OPENSSL_INCLUDES" >&5 + + save_LIBS="$LIBS" + save_LDFLAGS="$LDFLAGS" + save_CPPFLAGS="$CPPFLAGS" + LDFLAGS="$LDFLAGS $OPENSSL_LDFLAGS" + LIBS="$OPENSSL_LIBS $LIBS" + CPPFLAGS="$OPENSSL_INCLUDES $CPPFLAGS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +SSL_new(NULL) + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + enable_crypto=yes + + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + as_fn_error $? "failed to find OpenSSL" "$LINENO" 5 + enable_crypto=no + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + + + + + + + if test "x${enable_crypto}" = "xyes"; then + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"$OPENSSL_LDFLAGS\"" + LDFLAGS="$OPENSSL_LDFLAGS" + else + ats_addto_bugger="$OPENSSL_LDFLAGS" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"$OPENSSL_INCLUDES\"" + CPPFLAGS="$OPENSSL_INCLUDES" + else + ats_addto_bugger="$OPENSSL_INCLUDES" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + fi + + + +# +# Check for NextProtocolNegotiation TLS extension support. + + enable_tls_npn=yes + _npn_saved_LIBS=$LIBS + + if test "x$LIBS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBS to \"$OPENSSL_LIBS\"" + LIBS="$OPENSSL_LIBS" + else + ats_addto_bugger="$OPENSSL_LIBS" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBS" + LIBS="$LIBS $i" + fi + done + fi + + for ac_func in SSL_CTX_set_next_protos_advertised_cb SSL_CTX_set_next_proto_select_cb SSL_select_next_proto SSL_get0_next_proto_negotiated +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +else + enable_tls_npn=no + +fi +done + + LIBS=$_npn_saved_LIBS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable Next Protocol Negotiation TLS extension support" >&5 +$as_echo_n "checking whether to enable Next Protocol Negotiation TLS extension support... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_tls_npn" >&5 +$as_echo "$enable_tls_npn" >&6; } + + tsl_prefix="use" + tsl_stem="tls_npn" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + + +# +# Check for ALPN TLS extension support. + + enable_tls_alpn=yes + _alpn_saved_LIBS=$LIBS + + if test "x$LIBS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBS to \"$OPENSSL_LIBS\"" + LIBS="$OPENSSL_LIBS" + else + ats_addto_bugger="$OPENSSL_LIBS" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBS" + LIBS="$LIBS $i" + fi + done + fi + + for ac_func in SSL_CTX_set_alpn_protos SSL_CTX_set_alpn_select_cb SSL_get0_alpn_selected SSL_select_next_proto +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +else + enable_tls_alpn=no + +fi +done + + LIBS=$_alpn_saved_LIBS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable Application Layer Protocol Negotiation TLS extension support" >&5 +$as_echo_n "checking whether to enable Application Layer Protocol Negotiation TLS extension support... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_tls_alpn" >&5 +$as_echo "$enable_tls_alpn" >&6; } + + tsl_prefix="use" + tsl_stem="tls_alpn" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + +# +# Check for EC key support. + + + _eckeys_saved_LIBS=$LIBS + + if test "x$LIBS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBS to \"$OPENSSL_LIBS\"" + LIBS="$OPENSSL_LIBS" + else + ats_addto_bugger="$OPENSSL_LIBS" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBS" + LIBS="$LIBS $i" + fi + done + fi + + for ac_header in openssl/ec.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "openssl/ec.h" "ac_cv_header_openssl_ec_h" "$ac_includes_default" +if test "x$ac_cv_header_openssl_ec_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_OPENSSL_EC_H 1 +_ACEOF + +fi + +done + + for ac_func in EC_KEY_new_by_curve_name +do : + ac_fn_c_check_func "$LINENO" "EC_KEY_new_by_curve_name" "ac_cv_func_EC_KEY_new_by_curve_name" +if test "x$ac_cv_func_EC_KEY_new_by_curve_name" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EC_KEY_NEW_BY_CURVE_NAME 1 +_ACEOF + enable_tls_eckey=yes +else + enable_tls_eckey=no +fi +done + + LIBS=$_eckeys_saved_LIBS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether EC keys are supported" >&5 +$as_echo_n "checking whether EC keys are supported... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_tls_eckey" >&5 +$as_echo "$enable_tls_eckey" >&6; } + + tsl_prefix="use" + tsl_stem="tls_eckey" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + + +# +# Check for ServerNameIndication TLS extension support. + + _sni_saved_LIBS=$LIBS + enable_tls_sni=yes + + + if test "x$LIBS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBS to \"$OPENSSL_LIBS\"" + LIBS="$OPENSSL_LIBS" + else + ats_addto_bugger="$OPENSSL_LIBS" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBS" + LIBS="$LIBS $i" + fi + done + fi + + for ac_header in openssl/tls1.h openssl/ssl.h openssl/ts.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + # We are looking for SSL_CTX_set_tlsext_servername_callback, but it's a + # macro, so AC_CHECK_FUNCS is not going to do the business. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SSL_CTX_set_tlsext_servername_callback" >&5 +$as_echo_n "checking for SSL_CTX_set_tlsext_servername_callback... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +#if HAVE_OPENSSL_SSL_H +#include +#endif +#if HAVE_OPENSSL_TLS1_H +#include +#endif + +int +main () +{ +SSL_CTX_set_tlsext_servername_callback(NULL, NULL); + ; + return 0; +} + +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + enable_tls_sni=no + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + for ac_func in SSL_get_servername +do : + ac_fn_c_check_func "$LINENO" "SSL_get_servername" "ac_cv_func_SSL_get_servername" +if test "x$ac_cv_func_SSL_get_servername" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SSL_GET_SERVERNAME 1 +_ACEOF + +else + enable_tls_sni=no +fi +done + + + LIBS=$_sni_saved_LIBS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable ServerNameIndication TLS extension support" >&5 +$as_echo_n "checking whether to enable ServerNameIndication TLS extension support... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_tls_sni" >&5 +$as_echo "$enable_tls_sni" >&6; } + + tsl_prefix="use" + tsl_stem="tls_sni" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + + +# +# Check for zlib presence and usability + +enable_zlib=no + +# Check whether --with-zlib was given. +if test "${with_zlib+set}" = set; then : + withval=$with_zlib; + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + zlib_base_dir="$withval" + if test "$withval" != "no"; then + enable_zlib=yes + case "$withval" in + *":"*) + zlib_include="`echo $withval |sed -e 's/:.*$//'`" + zlib_ldflags="`echo $withval |sed -e 's/^.*://'`" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for zlib includes in $zlib_include libs in $zlib_ldflags " >&5 +$as_echo_n "checking checking for zlib includes in $zlib_include libs in $zlib_ldflags ... " >&6; } + ;; + *) + zlib_include="$withval/include" + zlib_ldflags="$withval/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for zlib includes in $withval" >&5 +$as_echo_n "checking checking for zlib includes in $withval... " >&6; } + ;; + esac + fi + fi + +fi + + +if test "x$zlib_base_dir" = "x"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for zlib location" >&5 +$as_echo_n "checking for zlib location... " >&6; } + if ${ats_cv_zlib_dir+:} false; then : + $as_echo_n "(cached) " >&6 +else + + for dir in /usr/local /usr ; do + if test -d $dir && test -f $dir/include/zlib.h; then + ats_cv_zlib_dir=$dir + break + fi + done + +fi + + zlib_base_dir=$ats_cv_zlib_dir + if test "x$zlib_base_dir" = "x"; then + enable_zlib=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + else + enable_zlib=yes + zlib_include="$zlib_base_dir/include" + zlib_ldflags="$zlib_base_dir/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $zlib_base_dir" >&5 +$as_echo "$zlib_base_dir" >&6; } + fi +else + if test -d $zlib_include && test -d $zlib_ldflags && test -f $zlib_include/zlib.h; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + fi +fi + +zlibh=0 +if test "$enable_zlib" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + zlib_have_headers=0 + zlib_have_libs=0 + if test "$zlib_base_dir" != "/usr"; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I${zlib_include}\"" + CPPFLAGS="-I${zlib_include}" + else + ats_addto_bugger="-I${zlib_include}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${zlib_ldflags}\"" + LDFLAGS="-L${zlib_ldflags}" + else + ats_addto_bugger="-L${zlib_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-rpath ${zlib_ldflags}\"" + LIBTOOL_LINK_FLAGS="-rpath ${zlib_ldflags}" + else + ats_addto_bugger="-rpath ${zlib_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing compressBound" >&5 +$as_echo_n "checking for library containing compressBound... " >&6; } +if ${ac_cv_search_compressBound+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char compressBound (); +int +main () +{ +return compressBound (); + ; + return 0; +} +_ACEOF +for ac_lib in '' z; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_compressBound=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_compressBound+:} false; then : + break +fi +done +if ${ac_cv_search_compressBound+:} false; then : + +else + ac_cv_search_compressBound=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_compressBound" >&5 +$as_echo "$ac_cv_search_compressBound" >&6; } +ac_res=$ac_cv_search_compressBound +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + zlib_have_libs=1 +fi + + if test "$zlib_have_libs" != "0"; then + +for ac_header in zlib.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" +if test "x$ac_cv_header_zlib_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_ZLIB_H 1 +_ACEOF + zlib_have_headers=1 +fi + +done + +for tsc_i in zlib.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + fi + if test "$zlib_have_headers" != "0"; then + LIBZ=-lz + + else + enable_zlib=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi + + + +# +# Check for lzma presence and usability + +enable_lzma=no + +# Check whether --with-lzma was given. +if test "${with_lzma+set}" = set; then : + withval=$with_lzma; + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + lzma_base_dir="$withval" + if test "$withval" != "no"; then + enable_lzma=yes + case "$withval" in + *":"*) + lzma_include="`echo $withval |sed -e 's/:.*$//'`" + lzma_ldflags="`echo $withval |sed -e 's/^.*://'`" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for lzma includes in $lzma_include libs in $lzma_ldflags " >&5 +$as_echo_n "checking checking for lzma includes in $lzma_include libs in $lzma_ldflags ... " >&6; } + ;; + *) + lzma_include="$withval/include" + lzma_ldflags="$withval/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for lzma includes in $withval" >&5 +$as_echo_n "checking checking for lzma includes in $withval... " >&6; } + ;; + esac + fi + fi + +fi + + +if test "x$lzma_base_dir" = "x"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for lzma location" >&5 +$as_echo_n "checking for lzma location... " >&6; } + if ${ats_cv_lzma_dir+:} false; then : + $as_echo_n "(cached) " >&6 +else + + for dir in /usr/local /usr ; do + if test -d $dir && test -f $dir/include/lzma.h; then + ats_cv_lzma_dir=$dir + break + fi + done + +fi + + lzma_base_dir=$ats_cv_lzma_dir + if test "x$lzma_base_dir" = "x"; then + enable_lzma=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + else + enable_lzma=yes + lzma_include="$lzma_base_dir/include" + lzma_ldflags="$lzma_base_dir/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lzma_base_dir" >&5 +$as_echo "$lzma_base_dir" >&6; } + fi +else + if test -d $lzma_include && test -d $lzma_ldflags && test -f $lzma_include/lzma.h; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + fi +fi + +lzmah=0 +if test "$enable_lzma" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + lzma_have_headers=0 + lzma_have_libs=0 + if test "$lzma_base_dir" != "/usr"; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I${lzma_include}\"" + CPPFLAGS="-I${lzma_include}" + else + ats_addto_bugger="-I${lzma_include}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${lzma_ldflags}\"" + LDFLAGS="-L${lzma_ldflags}" + else + ats_addto_bugger="-L${lzma_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-R${lzma_ldflags}\"" + LIBTOOL_LINK_FLAGS="-R${lzma_ldflags}" + else + ats_addto_bugger="-R${lzma_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing lzma_code" >&5 +$as_echo_n "checking for library containing lzma_code... " >&6; } +if ${ac_cv_search_lzma_code+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char lzma_code (); +int +main () +{ +return lzma_code (); + ; + return 0; +} +_ACEOF +for ac_lib in '' lzma; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_lzma_code=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_lzma_code+:} false; then : + break +fi +done +if ${ac_cv_search_lzma_code+:} false; then : + +else + ac_cv_search_lzma_code=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_lzma_code" >&5 +$as_echo "$ac_cv_search_lzma_code" >&6; } +ac_res=$ac_cv_search_lzma_code +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + lzma_have_libs=1 +fi + + if test "$lzma_have_libs" != "0"; then + +for ac_header in lzma.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "lzma.h" "ac_cv_header_lzma_h" "$ac_includes_default" +if test "x$ac_cv_header_lzma_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LZMA_H 1 +_ACEOF + lzma_have_headers=1 +fi + +done + +for tsc_i in lzma.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + fi + if test "$lzma_have_headers" != "0"; then + LIBLZMA=-llzma + + else + enable_lzma=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi + + + +# +# Tcl macros provided by build/tcl.m4 +# +# this will error out if tclConfig.sh is not found + + # + # Ok, lets find the tcl configuration + # First, look for one uninstalled. + # the alternative search directory is invoked by --with-tcl + # + + if test x"${no_tcl}" = x ; then + # we reset no_tcl in case something fails here + no_tcl=true + +# Check whether --with-tcl was given. +if test "${with_tcl+set}" = set; then : + withval=$with_tcl; with_tclconfig="${withval}" +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Tcl configuration" >&5 +$as_echo_n "checking for Tcl configuration... " >&6; } + if ${ac_cv_c_tclconfig+:} false; then : + $as_echo_n "(cached) " >&6 +else + + + # First check to see if --with-tcl was specified. + if test x"${with_tclconfig}" != x ; then + case "${with_tclconfig}" in + */tclConfig.sh ) + if test -f "${with_tclconfig}"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: --with-tcl argument should refer to directory containing tclConfig.sh, not to tclConfig.sh itself" >&5 +$as_echo "$as_me: WARNING: --with-tcl argument should refer to directory containing tclConfig.sh, not to tclConfig.sh itself" >&2;} + with_tclconfig="`echo "${with_tclconfig}" | sed 's!/tclConfig\.sh$!!'`" + fi ;; + esac + if test -f "${with_tclconfig}/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd "${with_tclconfig}"; pwd)`" + else + as_fn_error $? "${with_tclconfig} directory doesn't contain tclConfig.sh" "$LINENO" 5 + fi + fi + + # then check for a private Tcl installation + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ../tcl \ + `ls -dr ../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../tcl[8-9].[0-9]* 2>/dev/null` \ + ../../tcl \ + `ls -dr ../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../../tcl[8-9].[0-9]* 2>/dev/null` \ + ../../../tcl \ + `ls -dr ../../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ../../../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ../../../tcl[8-9].[0-9]* 2>/dev/null` ; do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i/unix; pwd)`" + break + fi + done + fi + + # on Darwin, check in Framework installation locations + if test "`uname -s`" = "Darwin" -a x"${ac_cv_c_tclconfig}" = x ; then + for i in "`xcrun --show-sdk-path 2>/dev/null`/usr/lib" \ + `ls -d ~/Library/Frameworks 2>/dev/null` \ + `ls -d /Library/Frameworks 2>/dev/null` \ + `ls -d /Network/Library/Frameworks 2>/dev/null` \ + `ls -d /System/Library/Frameworks 2>/dev/null` \ + ; do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i; pwd)`" + break + elif test -f "$i/Tcl.framework/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i/Tcl.framework; pwd)`" + break + fi + done + fi + + # check in a few common install locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in `ls -d ${libdir} 2>/dev/null` \ + `ls -d ${exec_prefix}/lib 2>/dev/null` \ + `ls -dr ${exec_prefix}/lib/tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ${exec_prefix}/lib/tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ${exec_prefix}/lib/tcl[8-9].[0-9]* 2>/dev/null` \ + `ls -d ${prefix}/lib 2>/dev/null` \ + `ls -dr ${prefix}/lib/tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ${prefix}/lib/tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ${prefix}/lib/tcl[8-9].[0-9]* 2>/dev/null` \ + `ls -d /usr/local/lib 2>/dev/null` \ + `ls -dr /usr/local/lib/tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr /usr/local/lib/tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr /usr/local/lib/tcl[8-9].[0-9]* 2>/dev/null` \ + `ls -d /usr/lib64 2>/dev/null` \ + `ls -dr /usr/lib64/tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr /usr/lib64/tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr /usr/lib64/tcl[8-9].[0-9]* 2>/dev/null` \ + `ls -d /usr/contrib/lib 2>/dev/null` \ + `ls -dr /usr/contrib/lib/tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr /usr/contrib/lib/tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr /usr/contrib/lib/tcl[8-9].[0-9]* 2>/dev/null` \ + `ls -d /usr/lib 2>/dev/null` \ + `ls -dr /usr/lib/tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr /usr/lib/tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr /usr/lib/tcl[8-9].[0-9]* 2>/dev/null` \ + ; do + if test -f "$i/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i; pwd)`" + break + fi + done + fi + + # check in a few other private locations + if test x"${ac_cv_c_tclconfig}" = x ; then + for i in \ + ${srcdir}/../tcl \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9] 2>/dev/null` \ + `ls -dr ${srcdir}/../tcl[8-9].[0-9]* 2>/dev/null` ; do + if test -f "$i/unix/tclConfig.sh" ; then + ac_cv_c_tclconfig="`(cd $i/unix; pwd)`" + break + fi + done + fi + +fi + + + if test x"${ac_cv_c_tclconfig}" = x ; then + TCL_BIN_DIR="# no Tcl configs found" + as_fn_error $? "Can't find Tcl configuration, install the TCL dev package" "$LINENO" 5 + else + no_tcl= + TCL_BIN_DIR="${ac_cv_c_tclconfig}" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: found ${TCL_BIN_DIR}/tclConfig.sh" >&5 +$as_echo "found ${TCL_BIN_DIR}/tclConfig.sh" >&6; } + fi + fi + + +# if tclConfig.sh loads properly, assume libraries are there and working + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for existence of ${TCL_BIN_DIR}/tclConfig.sh" >&5 +$as_echo_n "checking for existence of ${TCL_BIN_DIR}/tclConfig.sh... " >&6; } + + if test -f "${TCL_BIN_DIR}/tclConfig.sh" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: loading" >&5 +$as_echo "loading" >&6; } + . "${TCL_BIN_DIR}/tclConfig.sh" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: could not find ${TCL_BIN_DIR}/tclConfig.sh" >&5 +$as_echo "could not find ${TCL_BIN_DIR}/tclConfig.sh" >&6; } + fi + + # eval is required to do the TCL_DBGX substitution + eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" + eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" + + # If the TCL_BIN_DIR is the build directory (not the install directory), + # then set the common variable name to the value of the build variables. + # For example, the variable TCL_LIB_SPEC will be set to the value + # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC + # instead of TCL_BUILD_LIB_SPEC since it will work with both an + # installed and uninstalled version of Tcl. + if test -f "${TCL_BIN_DIR}/Makefile" ; then + TCL_LIB_SPEC="${TCL_BUILD_LIB_SPEC}" + TCL_STUB_LIB_SPEC="${TCL_BUILD_STUB_LIB_SPEC}" + TCL_STUB_LIB_PATH="${TCL_BUILD_STUB_LIB_PATH}" + elif test "`uname -s`" = "Darwin"; then + # If Tcl was built as a framework, attempt to use the libraries + # from the framework at the given location so that linking works + # against Tcl.framework installed in an arbitary location. + case ${TCL_DEFS} in + *TCL_FRAMEWORK*) + if test -f "${TCL_BIN_DIR}/${TCL_LIB_FILE}"; then + for i in "`cd "${TCL_BIN_DIR}"; pwd`" \ + "`cd "${TCL_BIN_DIR}"/../..; pwd`"; do + if test "`basename "$i"`" = "${TCL_LIB_FILE}.framework"; then + TCL_LIB_SPEC="-F`dirname "$i" | sed -e 's/ /\\\\ /g'` -framework ${TCL_LIB_FILE}" + break + fi + done + fi + if test -f "${TCL_BIN_DIR}/${TCL_STUB_LIB_FILE}"; then + TCL_STUB_LIB_SPEC="-L`echo "${TCL_BIN_DIR}" | sed -e 's/ /\\\\ /g'` ${TCL_STUB_LIB_FLAG}" + TCL_STUB_LIB_PATH="${TCL_BIN_DIR}/${TCL_STUB_LIB_FILE}" + fi + ;; + esac + fi + + # eval is required to do the TCL_DBGX substitution + eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" + eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" + eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" + eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" + + + + + + + + + + + + + + + +# expect tclConfig.sh to populate TCL_LIB_FLAG and TCL_INCLUDE_SPEC +if test "$host_os_def" == "darwin"; then + TCL_LIB_SPEC="-ltcl" # OSX fails to populate this variable +fi +LIBTCL=$TCL_LIB_SPEC + + + +if test "x${TCL_INCLUDE_SPEC}" != "x-I/usr/include"; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"$TCL_INCLUDE_SPEC\"" + CPPFLAGS="$TCL_INCLUDE_SPEC" + else + ats_addto_bugger="$TCL_INCLUDE_SPEC" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + +fi + +# +# Check for XML parser +# + + enable_xml=no + { $as_echo "$as_me:${as_lineno-$LINENO}: checking \"For XML parser\"" >&5 +$as_echo_n "checking \"For XML parser\"... " >&6; } + +# Check whether --with-xml was given. +if test "${with_xml+set}" = set; then : + withval=$with_xml; + if test "$withval" = "expat" ; then + +enable_expat=no + +# Check whether --with-expat was given. +if test "${with_expat+set}" = set; then : + withval=$with_expat; + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + expat_base_dir="$withval" + if test "$withval" != "no"; then + enable_expat=yes + case "$withval" in + *":"*) + expat_include="`echo $withval |sed -e 's/:.*$//'`" + expat_ldflags="`echo $withval |sed -e 's/^.*://'`" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for Expat includes in $expat_include libs in $expat_ldflags " >&5 +$as_echo_n "checking checking for Expat includes in $expat_include libs in $expat_ldflags ... " >&6; } + ;; + *) + expat_include="$withval/include" + expat_ldflags="$withval/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for Expat includes in $withval" >&5 +$as_echo_n "checking checking for Expat includes in $withval... " >&6; } + ;; + esac + fi + fi + +fi + + +if test "x$expat_base_dir" = "x"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Expat location" >&5 +$as_echo_n "checking for Expat location... " >&6; } + if ${ats_cv_expat_dir+:} false; then : + $as_echo_n "(cached) " >&6 +else + + _expat_dir_list="" + case $host_os_def in + darwin) + for dir in "`xcrun -show-sdk-path 2>/dev/null`/usr" /usr/local /usr; do + if test -d $dir && test -f $dir/include/expat.h; then + ats_cv_expat_dir=$dir + break + fi + done + ;; + *) + for dir in /usr/local /usr; do + if test -d $dir && test -f $dir/include/expat.h; then + ats_cv_expat_dir=$dir + break + fi + done + ;; + esac + + unset _expat_dir_list + +fi + + + expat_base_dir=$ats_cv_expat_dir + if test "x$expat_base_dir" = "x"; then + enable_expat=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + else + enable_expat=yes + expat_include="$expat_base_dir/include" + expat_ldflags="$expat_base_dir/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${expat_base_dir}" >&5 +$as_echo "${expat_base_dir}" >&6; } + fi +else + if test -d $expat_include && test -d $expat_ldflags && test -f $expat_include/expat.h; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + fi +fi + +expath=0 +if test "$enable_expat" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + expat_have_headers=0 + expat_have_libs=0 + if test "$expat_base_dir" != "/usr"; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I${expat_include}\"" + CPPFLAGS="-I${expat_include}" + else + ats_addto_bugger="-I${expat_include}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${expat_ldflags}\"" + LDFLAGS="-L${expat_ldflags}" + else + ats_addto_bugger="-L${expat_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-R${expat_ldflags}\"" + LIBTOOL_LINK_FLAGS="-R${expat_ldflags}" + else + ats_addto_bugger="-R${expat_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing XML_SetUserData" >&5 +$as_echo_n "checking for library containing XML_SetUserData... " >&6; } +if ${ac_cv_search_XML_SetUserData+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char XML_SetUserData (); +int +main () +{ +return XML_SetUserData (); + ; + return 0; +} +_ACEOF +for ac_lib in '' expat; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_XML_SetUserData=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_XML_SetUserData+:} false; then : + break +fi +done +if ${ac_cv_search_XML_SetUserData+:} false; then : + +else + ac_cv_search_XML_SetUserData=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_XML_SetUserData" >&5 +$as_echo "$ac_cv_search_XML_SetUserData" >&6; } +ac_res=$ac_cv_search_XML_SetUserData +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + expat_have_libs=1 +fi + + if test "$expat_have_libs" != "0"; then + +for ac_header in expat.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "expat.h" "ac_cv_header_expat_h" "$ac_includes_default" +if test "x$ac_cv_header_expat_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EXPAT_H 1 +_ACEOF + expat_have_headers=1 +fi + +done + +for tsc_i in expat.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + fi + if test "$expat_have_headers" != "0"; then + enable_xml=yes + + LIBEXPAT="-lexpat" + + +$as_echo "#define HAVE_LIBEXPAT 1" >>confdefs.h + + else + enable_expat=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi + + + elif test "$withval" = "libxml2" ; then + + enable_libxml2=no + libxml2_include="" + libxml2_ldflags="" + +# Check whether --with-libxml2 was given. +if test "${with_libxml2+set}" = set; then : + withval=$with_libxml2; + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + if test "$withval" = "yes"; then + enable_libxml2=yes + libxml2_include="/usr/include/libxml2" + elif test "$withval" != "no"; then + enable_libxml2=yes + libxml2_include="$withval/include/libxml2" + libxml2_ldflags="-L$withval/lib" + fi + fi + +fi + + if test ${enable_libxml2} = "no"; then + enable_libxml2=yes + libxml2_include="/usr/include/libxml2" + fi + if test ${enable_libxml2} != "no"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking libxml2" >&5 +$as_echo_n "checking libxml2... " >&6; } +if ${ts_cv_libxml2+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ts_libxml2_CPPFLAGS=$CPPFLAGS + ts_libxml2_LIBS="$LIBS" + ts_libxml2_LDFLAGS="$LDFLAGS" + CPPFLAGS="$CPPFLAGS -I$libxml2_include" + LDFLAGS="$LDFLAGS $libxml2_ldflags" + LIBS="$LIBS -lxml2" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +xmlSAXHandler sax; xmlCreatePushParserCtxt(&sax, NULL, NULL, 0, NULL); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ts_cv_libxml2=yes +else + ts_cv_libxml2=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CPPFLAGS=$ts_libxml2_CPPFLAGS + LIBS=$ts_libxml2_LIBS + LDFLAGS=$ts_libxml2_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ts_cv_libxml2" >&5 +$as_echo "$ts_cv_libxml2" >&6; } + if test $ts_cv_libxml2 = yes ; then + +$as_echo "#define HAVE_LIBXML2 1" >>confdefs.h + + if test -d "$libxml2_include" ; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I${libxml2_include}\"" + CPPFLAGS="-I${libxml2_include}" + else + ats_addto_bugger="-I${libxml2_include}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + fi + if test -d "$libxml2_ldflags" ; then + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${libxml2_ldflags}\"" + LDFLAGS="-L${libxml2_ldflags}" + else + ats_addto_bugger="-L${libxml2_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-R${libxml2_ldflags}\"" + LIBTOOL_LINK_FLAGS="-R${libxml2_ldflags}" + else + ats_addto_bugger="-R${libxml2_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + + if test "x$LIBS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBS to \"-lxml2\"" + LIBS="-lxml2" + else + ats_addto_bugger="-lxml2" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBS" + LIBS="$LIBS $i" + fi + done + fi + + enable_xml=yes + else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \"Failed to find libxml2\"" >&5 +$as_echo "$as_me: WARNING: \"Failed to find libxml2\"" >&2;} + fi + fi + + else + as_fn_error $? "Unrecognised --with-xml option" "$LINENO" 5 + fi + +else + + + enable_libxml2=no + libxml2_include="" + libxml2_ldflags="" + +# Check whether --with-libxml2 was given. +if test "${with_libxml2+set}" = set; then : + withval=$with_libxml2; + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + if test "$withval" = "yes"; then + enable_libxml2=yes + libxml2_include="/usr/include/libxml2" + elif test "$withval" != "no"; then + enable_libxml2=yes + libxml2_include="$withval/include/libxml2" + libxml2_ldflags="-L$withval/lib" + fi + fi + +fi + + if test ${enable_libxml2} = "no"; then + enable_libxml2=yes + libxml2_include="/usr/include/libxml2" + fi + if test ${enable_libxml2} != "no"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking libxml2" >&5 +$as_echo_n "checking libxml2... " >&6; } +if ${ts_cv_libxml2+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ts_libxml2_CPPFLAGS=$CPPFLAGS + ts_libxml2_LIBS="$LIBS" + ts_libxml2_LDFLAGS="$LDFLAGS" + CPPFLAGS="$CPPFLAGS -I$libxml2_include" + LDFLAGS="$LDFLAGS $libxml2_ldflags" + LIBS="$LIBS -lxml2" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +xmlSAXHandler sax; xmlCreatePushParserCtxt(&sax, NULL, NULL, 0, NULL); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ts_cv_libxml2=yes +else + ts_cv_libxml2=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CPPFLAGS=$ts_libxml2_CPPFLAGS + LIBS=$ts_libxml2_LIBS + LDFLAGS=$ts_libxml2_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ts_cv_libxml2" >&5 +$as_echo "$ts_cv_libxml2" >&6; } + if test $ts_cv_libxml2 = yes ; then + +$as_echo "#define HAVE_LIBXML2 1" >>confdefs.h + + if test -d "$libxml2_include" ; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I${libxml2_include}\"" + CPPFLAGS="-I${libxml2_include}" + else + ats_addto_bugger="-I${libxml2_include}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + fi + if test -d "$libxml2_ldflags" ; then + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${libxml2_ldflags}\"" + LDFLAGS="-L${libxml2_ldflags}" + else + ats_addto_bugger="-L${libxml2_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-R${libxml2_ldflags}\"" + LIBTOOL_LINK_FLAGS="-R${libxml2_ldflags}" + else + ats_addto_bugger="-R${libxml2_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + + if test "x$LIBS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBS to \"-lxml2\"" + LIBS="-lxml2" + else + ats_addto_bugger="-lxml2" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBS" + LIBS="$LIBS $i" + fi + done + fi + + enable_xml=yes + else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \"Failed to find libxml2\"" >&5 +$as_echo "$as_me: WARNING: \"Failed to find libxml2\"" >&2;} + fi + fi + + if test "$enable_xml" = "no"; then + +enable_expat=no + +# Check whether --with-expat was given. +if test "${with_expat+set}" = set; then : + withval=$with_expat; + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + expat_base_dir="$withval" + if test "$withval" != "no"; then + enable_expat=yes + case "$withval" in + *":"*) + expat_include="`echo $withval |sed -e 's/:.*$//'`" + expat_ldflags="`echo $withval |sed -e 's/^.*://'`" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for Expat includes in $expat_include libs in $expat_ldflags " >&5 +$as_echo_n "checking checking for Expat includes in $expat_include libs in $expat_ldflags ... " >&6; } + ;; + *) + expat_include="$withval/include" + expat_ldflags="$withval/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for Expat includes in $withval" >&5 +$as_echo_n "checking checking for Expat includes in $withval... " >&6; } + ;; + esac + fi + fi + +fi + + +if test "x$expat_base_dir" = "x"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Expat location" >&5 +$as_echo_n "checking for Expat location... " >&6; } + if ${ats_cv_expat_dir+:} false; then : + $as_echo_n "(cached) " >&6 +else + + _expat_dir_list="" + case $host_os_def in + darwin) + for dir in "`xcrun -show-sdk-path 2>/dev/null`/usr" /usr/local /usr; do + if test -d $dir && test -f $dir/include/expat.h; then + ats_cv_expat_dir=$dir + break + fi + done + ;; + *) + for dir in /usr/local /usr; do + if test -d $dir && test -f $dir/include/expat.h; then + ats_cv_expat_dir=$dir + break + fi + done + ;; + esac + + unset _expat_dir_list + +fi + + + expat_base_dir=$ats_cv_expat_dir + if test "x$expat_base_dir" = "x"; then + enable_expat=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + else + enable_expat=yes + expat_include="$expat_base_dir/include" + expat_ldflags="$expat_base_dir/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${expat_base_dir}" >&5 +$as_echo "${expat_base_dir}" >&6; } + fi +else + if test -d $expat_include && test -d $expat_ldflags && test -f $expat_include/expat.h; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + fi +fi + +expath=0 +if test "$enable_expat" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + expat_have_headers=0 + expat_have_libs=0 + if test "$expat_base_dir" != "/usr"; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I${expat_include}\"" + CPPFLAGS="-I${expat_include}" + else + ats_addto_bugger="-I${expat_include}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${expat_ldflags}\"" + LDFLAGS="-L${expat_ldflags}" + else + ats_addto_bugger="-L${expat_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-R${expat_ldflags}\"" + LIBTOOL_LINK_FLAGS="-R${expat_ldflags}" + else + ats_addto_bugger="-R${expat_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing XML_SetUserData" >&5 +$as_echo_n "checking for library containing XML_SetUserData... " >&6; } +if ${ac_cv_search_XML_SetUserData+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char XML_SetUserData (); +int +main () +{ +return XML_SetUserData (); + ; + return 0; +} +_ACEOF +for ac_lib in '' expat; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_XML_SetUserData=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_XML_SetUserData+:} false; then : + break +fi +done +if ${ac_cv_search_XML_SetUserData+:} false; then : + +else + ac_cv_search_XML_SetUserData=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_XML_SetUserData" >&5 +$as_echo "$ac_cv_search_XML_SetUserData" >&6; } +ac_res=$ac_cv_search_XML_SetUserData +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + expat_have_libs=1 +fi + + if test "$expat_have_libs" != "0"; then + +for ac_header in expat.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "expat.h" "ac_cv_header_expat_h" "$ac_includes_default" +if test "x$ac_cv_header_expat_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EXPAT_H 1 +_ACEOF + expat_have_headers=1 +fi + +done + +for tsc_i in expat.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + fi + if test "$expat_have_headers" != "0"; then + enable_xml=yes + + LIBEXPAT="-lexpat" + + +$as_echo "#define HAVE_LIBEXPAT 1" >>confdefs.h + + else + enable_expat=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi + + + fi + +fi + + if test "$enable_xml" = "no"; then + as_fn_error $? "An XML parser (expat or libxml2) is required." "$LINENO" 5 + fi + +if test "x${enable_xml}" != "xyes"; then + as_fn_error $? "Need at least one XML library, --with-expat is supported" "$LINENO" 5 +fi + +for ac_func in clock_gettime kqueue epoll_ctl posix_memalign posix_fadvise posix_madvise posix_fallocate inotify_init +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +for ac_func in lrand48_r srand48_r port_create strlcpy strlcat sysconf getpagesize +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + +# Check for eventfd() and sys/eventfd.h (both must exist ...) + +for ac_header in sys/eventfd.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "sys/eventfd.h" "ac_cv_header_sys_eventfd_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_eventfd_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SYS_EVENTFD_H 1 +_ACEOF + + if test "x$enable_eventfd" = "xyes"; then : + + for ac_func in eventfd +do : + ac_fn_c_check_func "$LINENO" "eventfd" "ac_cv_func_eventfd" +if test "x$ac_cv_func_eventfd" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EVENTFD 1 +_ACEOF + +fi +done + + + +fi +fi + +done + +for tsc_i in sys/eventfd.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + +for ac_func in eventfd +do : + ac_fn_c_check_func "$LINENO" "eventfd" "ac_cv_func_eventfd" +if test "x$ac_cv_func_eventfd" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EVENTFD 1 +_ACEOF + +fi +done + + +# +# Check for mcheck_pedantic(3) +# + +for ac_header in mcheck.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "mcheck.h" "ac_cv_header_mcheck_h" "$ac_includes_default" +if test "x$ac_cv_header_mcheck_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_MCHECK_H 1 +_ACEOF + +fi + +done + +for tsc_i in mcheck.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + +for ac_func in mcheck_pedantic +do : + ac_fn_c_check_func "$LINENO" "mcheck_pedantic" "ac_cv_func_mcheck_pedantic" +if test "x$ac_cv_func_mcheck_pedantic" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_MCHECK_PEDANTIC 1 +_ACEOF + +fi +done + + +# +# Check for pcre library +# + +enable_pcre=no + +# Check whether --with-pcre was given. +if test "${with_pcre+set}" = set; then : + withval=$with_pcre; + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + pcre_base_dir="$withval" + if test "$withval" != "no"; then + enable_pcre=yes + case "$withval" in + *":"*) + pcre_include="`echo $withval |sed -e 's/:.*$//'`" + pcre_ldflags="`echo $withval |sed -e 's/^.*://'`" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for pcre includes in $pcre_include libs in $pcre_ldflags " >&5 +$as_echo_n "checking checking for pcre includes in $pcre_include libs in $pcre_ldflags ... " >&6; } + ;; + *) + pcre_include="$withval/include" + pcre_ldflags="$withval/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for pcre includes in $withval" >&5 +$as_echo_n "checking checking for pcre includes in $withval... " >&6; } + ;; + esac + fi + fi + +else + + # Extract the first word of "pcre-config", so it can be a program name with args. +set dummy pcre-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_PCRE_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$PCRE_CONFIG"; then + ac_cv_prog_PCRE_CONFIG="$PCRE_CONFIG" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_PCRE_CONFIG="pcre-config" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +PCRE_CONFIG=$ac_cv_prog_PCRE_CONFIG +if test -n "$PCRE_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PCRE_CONFIG" >&5 +$as_echo "$PCRE_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + if test "x$PCRE_CONFIG" != "x"; then + enable_pcre=yes + pcre_base_dir="`$PCRE_CONFIG --prefix`" + pcre_include="`$PCRE_CONFIG --cflags | sed -es/-I//`" + pcre_ldflags="`$PCRE_CONFIG --libs | sed -es/-lpcre// -es/-L//`" + fi + +fi + + +if test "x$pcre_base_dir" = "x"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pcre location" >&5 +$as_echo_n "checking for pcre location... " >&6; } + if ${ats_cv_pcre_dir+:} false; then : + $as_echo_n "(cached) " >&6 +else + + for dir in /usr/local /usr ; do + if test -d $dir && ( test -f $dir/include/pcre.h || test -f $dir/include/pcre/pcre.h ); then + ats_cv_pcre_dir=$dir + break + fi + done + +fi + + pcre_base_dir=$ats_cv_pcre_dir + if test "x$pcre_base_dir" = "x"; then + enable_pcre=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + else + enable_pcre=yes + pcre_include="$pcre_base_dir/include" + pcre_ldflags="$pcre_base_dir/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pcre_base_dir" >&5 +$as_echo "$pcre_base_dir" >&6; } + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pcre headers in $pcre_include" >&5 +$as_echo_n "checking for pcre headers in $pcre_include... " >&6; } + if test -d $pcre_include && test -d $pcre_ldflags && ( test -f $pcre_include/pcre.h || test -f $pcre_include/pcre/pcre.h ); then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: not found" >&5 +$as_echo "not found" >&6; } + fi +fi + +pcreh=0 +pcre_pcreh=0 +if test "$enable_pcre" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + pcre_have_headers=0 + pcre_have_libs=0 + if test "$pcre_base_dir" != "/usr"; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I${pcre_include}\"" + CPPFLAGS="-I${pcre_include}" + else + ats_addto_bugger="-I${pcre_include}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${pcre_ldflags}\"" + LDFLAGS="-L${pcre_ldflags}" + else + ats_addto_bugger="-L${pcre_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-R${pcre_ldflags}\"" + LIBTOOL_LINK_FLAGS="-R${pcre_ldflags}" + else + ats_addto_bugger="-R${pcre_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pcre_exec" >&5 +$as_echo_n "checking for library containing pcre_exec... " >&6; } +if ${ac_cv_search_pcre_exec+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pcre_exec (); +int +main () +{ +return pcre_exec (); + ; + return 0; +} +_ACEOF +for ac_lib in '' pcre; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_pcre_exec=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_pcre_exec+:} false; then : + break +fi +done +if ${ac_cv_search_pcre_exec+:} false; then : + +else + ac_cv_search_pcre_exec=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pcre_exec" >&5 +$as_echo "$ac_cv_search_pcre_exec" >&6; } +ac_res=$ac_cv_search_pcre_exec +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + pcre_have_libs=1 +fi + + if test "$pcre_have_libs" != "0"; then + +for ac_header in pcre.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "pcre.h" "ac_cv_header_pcre_h" "$ac_includes_default" +if test "x$ac_cv_header_pcre_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_PCRE_H 1 +_ACEOF + pcre_have_headers=1 +fi + +done + +for tsc_i in pcre.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + +for ac_header in pcre/pcre.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "pcre/pcre.h" "ac_cv_header_pcre_pcre_h" "$ac_includes_default" +if test "x$ac_cv_header_pcre_pcre_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_PCRE_PCRE_H 1 +_ACEOF + pcre_have_headers=1 +fi + +done + +for tsc_i in pcre/pcre.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + fi + if test "$pcre_have_headers" != "0"; then + +$as_echo "#define HAVE_LIBPCRE 1" >>confdefs.h + + LIBPCRE=-lpcre + + else + enable_pcre=no + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi + + + +if test "x${enable_pcre}" != "xyes"; then + as_fn_error $? "Cannot find pcre library. Configure --with-pcre=DIR" "$LINENO" 5 +fi + +has_backtrace=0 +# Check for backtrace() support + +for ac_header in execinfo.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "execinfo.h" "ac_cv_header_execinfo_h" "$ac_includes_default" +if test "x$ac_cv_header_execinfo_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EXECINFO_H 1 +_ACEOF + has_backtrace=1 +fi + +done + +for tsc_i in execinfo.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + +if test "${has_backtrace}" = "1"; then + # FreeBSD requires '/usr/ports/devel/libexecinfo' for gdb style backtrace() support + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing backtrace" >&5 +$as_echo_n "checking for library containing backtrace... " >&6; } +if ${ac_cv_search_backtrace+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char backtrace (); +int +main () +{ +return backtrace (); + ; + return 0; +} +_ACEOF +for ac_lib in '' execinfo; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_backtrace=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_backtrace+:} false; then : + break +fi +done +if ${ac_cv_search_backtrace+:} false; then : + +else + ac_cv_search_backtrace=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_backtrace" >&5 +$as_echo "$ac_cv_search_backtrace" >&6; } +ac_res=$ac_cv_search_backtrace +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + have_backtrace_lib=yes +fi + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: No backtrace() support found" >&5 +$as_echo "$as_me: WARNING: No backtrace() support found" >&2;} +fi + + + +use_epoll=0 +use_kqueue=0 +use_port=0 +if test "$ac_cv_func_epoll_ctl" = "yes"; then + use_epoll=1 + have_good_poller=1 + { $as_echo "$as_me:${as_lineno-$LINENO}: Using epoll event interface" >&5 +$as_echo "$as_me: Using epoll event interface" >&6;} +elif test "$ac_cv_func_kqueue" = "yes"; then + use_kqueue=1 + have_good_poller=1 + { $as_echo "$as_me:${as_lineno-$LINENO}: Using kqueue event interface" >&5 +$as_echo "$as_me: Using kqueue event interface" >&6;} +elif test "$ac_cv_func_port_create" = "yes"; then + use_port=1 + have_good_poller=1 + { $as_echo "$as_me:${as_lineno-$LINENO}: Using port event interface" >&5 +$as_echo "$as_me: Using port event interface" >&6;} +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "No suitable polling interface found +See \`config.log' for more details" "$LINENO" 5; } +fi + + + + +has_profiler=0 +if test "x${with_profiler}" = "xyes"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing ProfilerStart" >&5 +$as_echo_n "checking for library containing ProfilerStart... " >&6; } +if ${ac_cv_search_ProfilerStart+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ProfilerStart (); +int +main () +{ +return ProfilerStart (); + ; + return 0; +} +_ACEOF +for ac_lib in '' profiler; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_ProfilerStart=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_ProfilerStart+:} false; then : + break +fi +done +if ${ac_cv_search_ProfilerStart+:} false; then : + +else + ac_cv_search_ProfilerStart=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_ProfilerStart" >&5 +$as_echo "$ac_cv_search_ProfilerStart" >&6; } +ac_res=$ac_cv_search_ProfilerStart +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + LIBPROFILER="-lprofiler" + + has_profiler=1 + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "check for profiler failed. Have you installed google-perftools-devel? +See \`config.log' for more details" "$LINENO" 5; } +fi + +fi + + +# For SunPro 5.2 - we need the demangle symbol for +# ink_stack_trace.cc in lib/ts. Because this +# library is part of the SunPro distribution, we need +# check with the C++ compiler since we might be using +# SunPro CC for the C++ compiler and gcc for the C compiler +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing cplus_demangle" >&5 +$as_echo_n "checking for library containing cplus_demangle... " >&6; } +if ${ac_cv_search_cplus_demangle+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char cplus_demangle (); +int +main () +{ +return cplus_demangle (); + ; + return 0; +} +_ACEOF +for ac_lib in '' demangle; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_search_cplus_demangle=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_cplus_demangle+:} false; then : + break +fi +done +if ${ac_cv_search_cplus_demangle+:} false; then : + +else + ac_cv_search_cplus_demangle=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_cplus_demangle" >&5 +$as_echo "$ac_cv_search_cplus_demangle" >&6; } +ac_res=$ac_cv_search_cplus_demangle +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + LIBDEMANGLE="-ldemangle" + + +fi + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for 128bit CAS support" >&5 +$as_echo_n "checking for 128bit CAS support... " >&6; } +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +# We need to save and restore compiler flags around this whole block. +# TS_TRY_COMPILE_NO_WARNING will save and restore flags, so if we do that in the +# middle, then we can accidentally restore modified flags. +__saved_CXXFLAGS="${CXXFLAGS}" +__saved_CFLAGS="${CFLAGS}" + +has_128bit_cas=0 + +ats_save_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS $CFLAGS_WARN" + if test "$ac_cv_c_compiler_gnu" = "yes"; then + CFLAGS="$CFLAGS -Werror" + fi + CFLAGS=$(echo $CFLAGS | sed -e 's/^-w$//' -e 's/^-w //' -e 's/ -w$//' -e 's/ -w / /') + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + __int128_t x = 0; + __sync_bool_compare_and_swap(&x,0,10); + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + has_128bit_cas=1 + +else + + + if test "x$CXXFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CXXFLAGS to \"-mcx16\"" + CXXFLAGS="-mcx16" + else + ats_addto_bugger="-mcx16" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CXXFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CXXFLAGS" + CXXFLAGS="$CXXFLAGS $i" + fi + done + fi + + + if test "x$CFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CFLAGS to \"-mcx16\"" + CFLAGS="-mcx16" + else + ats_addto_bugger="-mcx16" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CFLAGS" + CFLAGS="$CFLAGS $i" + fi + done + fi + + ats_save_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS $CFLAGS_WARN" + if test "$ac_cv_c_compiler_gnu" = "yes"; then + CFLAGS="$CFLAGS -Werror" + fi + CFLAGS=$(echo $CFLAGS | sed -e 's/^-w$//' -e 's/^-w //' -e 's/ -w$//' -e 's/ -w / /') + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + __int128_t x = 0; + __sync_bool_compare_and_swap(&x,0,10); + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + has_128bit_cas=1 + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS=$ats_save_CFLAGS + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS=$ats_save_CFLAGS + + +CXXFLAGS="${__saved_CXXFLAGS}" +CFLAGS="${__saved_CFLAGS}" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +if test "x$has_128bit_cas" = "x1"; then : + + + if test "x$CFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CFLAGS to \"-mcx16\"" + CFLAGS="-mcx16" + else + ats_addto_bugger="-mcx16" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CFLAGS" + CFLAGS="$CFLAGS $i" + fi + done + fi + + + if test "x$CXXFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CXXFLAGS to \"-mcx16\"" + CXXFLAGS="-mcx16" + else + ats_addto_bugger="-mcx16" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CXXFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CXXFLAGS" + CXXFLAGS="$CXXFLAGS $i" + fi + done + fi + + +fi + +# Check for POSIX capabilities library. +# If we don't find it, disable checking for header. +use_posix_cap=0 +if test "x$enable_posix_cap" != "xno"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing cap_set_proc" >&5 +$as_echo_n "checking for library containing cap_set_proc... " >&6; } +if ${ac_cv_search_cap_set_proc+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char cap_set_proc (); +int +main () +{ +return cap_set_proc (); + ; + return 0; +} +_ACEOF +for ac_lib in '' cap; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_cap_set_proc=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_cap_set_proc+:} false; then : + break +fi +done +if ${ac_cv_search_cap_set_proc+:} false; then : + +else + ac_cv_search_cap_set_proc=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_cap_set_proc" >&5 +$as_echo "$ac_cv_search_cap_set_proc" >&6; } +ac_res=$ac_cv_search_cap_set_proc +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + LIBCAP="-lcap" + + use_posix_cap=1 + +else + + if test "x$enable_posix_cap" == "xyes"; then : + + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "POSIX capabilities enabled but system library not found. +See \`config.log' for more details" "$LINENO" 5; } + +else + + enable_posix_cap=no + +fi + + +fi + + +fi + + +# +# If the OS is linux, we can use the '--enable-linux-native-aio' option to +# replace the aio thread mode. Effective only on the linux system. +# + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable Linux native AIO" >&5 +$as_echo_n "checking whether to enable Linux native AIO... " >&6; } +# Check whether --enable-linux-native-aio was given. +if test "${enable_linux_native_aio+set}" = set; then : + enableval=$enable_linux_native_aio; enable_linux_native_aio="${enableval}" +else + enable_linux_native_aio=no + +fi + + +if test "x$enable_linux_native_aio" = "xyes"; then : + + if test $host_os_def != "linux"; then + as_fn_error $? "Linux native AIO can only be enabled on Linux systems" "$LINENO" 5 + fi + + for ac_header in libaio.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "libaio.h" "ac_cv_header_libaio_h" "$ac_includes_default" +if test "x$ac_cv_header_libaio_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBAIO_H 1 +_ACEOF + +else + as_fn_error $? "Linux native AIO requires libaio.h" "$LINENO" 5 + +fi + +done + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing io_submit" >&5 +$as_echo_n "checking for library containing io_submit... " >&6; } +if ${ac_cv_search_io_submit+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char io_submit (); +int +main () +{ +return io_submit (); + ; + return 0; +} +_ACEOF +for ac_lib in '' aio; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_io_submit=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_io_submit+:} false; then : + break +fi +done +if ${ac_cv_search_io_submit+:} false; then : + +else + ac_cv_search_io_submit=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_io_submit" >&5 +$as_echo "$ac_cv_search_io_submit" >&6; } +ac_res=$ac_cv_search_io_submit +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +else + as_fn_error $? "Linux native AIO requires libaio" "$LINENO" 5 + +fi + + + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_linux_native_aio" >&5 +$as_echo "$enable_linux_native_aio" >&6; } + + tsl_prefix="use" + tsl_stem="linux_native_aio" + eval "tsl_enable=\$enable_${tsl_stem}" + if test "x$tsl_enable" = "xyes"; then : + eval "${tsl_prefix}_${tsl_stem}=1" +else + eval "${tsl_prefix}_${tsl_stem}=0" + +fi + + + +# Check for hwloc library. +# If we don't find it, disable checking for header. +use_hwloc=0 + + + + + + + +if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. +set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PKG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PKG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +PKG_CONFIG=$ac_cv_path_PKG_CONFIG +if test -n "$PKG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 +$as_echo "$PKG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_path_PKG_CONFIG"; then + ac_pt_PKG_CONFIG=$PKG_CONFIG + # Extract the first word of "pkg-config", so it can be a program name with args. +set dummy pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $ac_pt_PKG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG +if test -n "$ac_pt_PKG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 +$as_echo "$ac_pt_PKG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_pt_PKG_CONFIG" = x; then + PKG_CONFIG="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + PKG_CONFIG=$ac_pt_PKG_CONFIG + fi +else + PKG_CONFIG="$ac_cv_path_PKG_CONFIG" +fi + +fi +if test -n "$PKG_CONFIG"; then + _pkg_min_version=0.9.0 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 +$as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } + if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + PKG_CONFIG="" + fi +fi +if test "x$enable_hwloc" = "xyes"; then : + + # Use pkg-config, because some distros (*cough* Ubuntu) put hwloc in unusual places. + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for HWLOC" >&5 +$as_echo_n "checking for HWLOC... " >&6; } + +if test -n "$HWLOC_CFLAGS"; then + pkg_cv_HWLOC_CFLAGS="$HWLOC_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"hwloc\""; } >&5 + ($PKG_CONFIG --exists --print-errors "hwloc") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_HWLOC_CFLAGS=`$PKG_CONFIG --cflags "hwloc" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$HWLOC_LIBS"; then + pkg_cv_HWLOC_LIBS="$HWLOC_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"hwloc\""; } >&5 + ($PKG_CONFIG --exists --print-errors "hwloc") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_HWLOC_LIBS=`$PKG_CONFIG --libs "hwloc" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + HWLOC_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "hwloc" 2>&1` + else + HWLOC_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "hwloc" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$HWLOC_PKG_ERRORS" >&5 + + + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: hwloc not found, try --disable-hwloc" >&5 +$as_echo "$as_me: WARNING: hwloc not found, try --disable-hwloc" >&2;} + HWLOC_CFLAGS="" + + HWLOC_LIBS="" + + +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: hwloc not found, try --disable-hwloc" >&5 +$as_echo "$as_me: WARNING: hwloc not found, try --disable-hwloc" >&2;} + HWLOC_CFLAGS="" + + HWLOC_LIBS="" + + +else + HWLOC_CFLAGS=$pkg_cv_HWLOC_CFLAGS + HWLOC_LIBS=$pkg_cv_HWLOC_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + SAVE_LIBS="$LIBS" + LIBS="-lhwloc" + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for hwloc C++ linking" >&5 +$as_echo_n "checking for hwloc C++ linking... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include +int +main () +{ +hwloc_topology_t t; hwloc_topology_init(&t); hwloc_get_type_depth(t, HWLOC_OBJ_SOCKET); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + use_hwloc=1 + + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + # Old versions of libhwloc don't have HWLOC_OBJ_PU. + ac_fn_cxx_check_decl "$LINENO" "HWLOC_OBJ_PU" "ac_cv_have_decl_HWLOC_OBJ_PU" "#include + +" +if test "x$ac_cv_have_decl_HWLOC_OBJ_PU" = xyes; then : + +$as_echo "#define HAVE_HWLOC_OBJ_PU 1" >>confdefs.h + +fi + + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: hwloc not linkable, try --disable-hwloc" >&5 +$as_echo "$as_me: WARNING: hwloc not linkable, try --disable-hwloc" >&2;} + HWLOC_CFLAGS="" + + HWLOC_LIBS="" + + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + LIBS="$SAVE_LIBS" + +fi + +fi + + + +# +# Check for Maxmind APIs / includes. TODO: Long term, it might make sense to support +# GeoIP as a "helper" plugin, which other plugins can then use. Such a plugin could +# then manage which libraries to use via explicit dlopen()'s. +# +enable_maxmind_geoip=no + + __saved_LIBS="$LIBS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing GeoIP_id_by_code" >&5 +$as_echo_n "checking for library containing GeoIP_id_by_code... " >&6; } +if ${ac_cv_search_GeoIP_id_by_code+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char GeoIP_id_by_code (); +int +main () +{ +return GeoIP_id_by_code (); + ; + return 0; +} +_ACEOF +for ac_lib in '' GeoIP; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_GeoIP_id_by_code=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_GeoIP_id_by_code+:} false; then : + break +fi +done +if ${ac_cv_search_GeoIP_id_by_code+:} false; then : + +else + ac_cv_search_GeoIP_id_by_code=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_GeoIP_id_by_code" >&5 +$as_echo "$ac_cv_search_GeoIP_id_by_code" >&6; } +ac_res=$ac_cv_search_GeoIP_id_by_code +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + + case $ac_cv_search_GeoIP_id_by_code in + "none required"|"no") ts_cv_search_GeoIP_id_by_code="" ;; + *) ts_cv_search_GeoIP_id_by_code=$ac_cv_search_GeoIP_id_by_code ;; + esac + + GEOIP_LIBS=$ts_cv_search_GeoIP_id_by_code + +for ac_header in GeoIP.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "GeoIP.h" "ac_cv_header_GeoIP_h" "$ac_includes_default" +if test "x$ac_cv_header_GeoIP_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GEOIP_H 1 +_ACEOF + enable_maxmind_geoip=yes +fi + +done + +for tsc_i in GeoIP.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + +else + + true + +fi + + + LIBS="$__saved_LIBS" + unset __saved_LIBS + + + + if test "x${enable_maxmind_geoip}" = x"yes" ; then + BUILD_GEOIP_PLUGIN_TRUE= + BUILD_GEOIP_PLUGIN_FALSE='#' +else + BUILD_GEOIP_PLUGIN_TRUE='#' + BUILD_GEOIP_PLUGIN_FALSE= +fi + + +# Right now, the healthcheck plugins requires inotify_init (and friends) + if test "$ac_cv_func_inotify_init" = "yes" ; then + BUILD_HEALTHCHECK_PLUGIN_TRUE= + BUILD_HEALTHCHECK_PLUGIN_FALSE='#' +else + BUILD_HEALTHCHECK_PLUGIN_TRUE='#' + BUILD_HEALTHCHECK_PLUGIN_FALSE= +fi + + +# +# Check for tcmalloc and jemalloc + +enable_jemalloc=no + +# Check whether --with-jemalloc was given. +if test "${with_jemalloc+set}" = set; then : + withval=$with_jemalloc; + if test "$withval" != "no"; then + if test "x${enable_tcmalloc}" = "xyes"; then + as_fn_error $? "Cannot compile with both jemalloc and tcmalloc" "$LINENO" 5 + fi + enable_jemalloc=yes + jemalloc_base_dir="$withval" + case "$withval" in + yes) + jemalloc_base_dir="/usr" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes standard directories" >&5 +$as_echo_n "checking checking for jemalloc includes standard directories... " >&6; } + ;; + *":"*) + jemalloc_include="`echo $withval |sed -e 's/:.*$//'`" + jemalloc_ldflags="`echo $withval |sed -e 's/^.*://'`" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags" >&5 +$as_echo_n "checking checking for jemalloc includes in $jemalloc_include libs in $jemalloc_ldflags... " >&6; } + ;; + *) + jemalloc_include="$withval/include" + jemalloc_ldflags="$withval/lib" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking checking for jemalloc includes in $withval" >&5 +$as_echo_n "checking checking for jemalloc includes in $withval... " >&6; } + ;; + esac + fi + +fi + + +jemalloch=0 +if test "$enable_jemalloc" != "no"; then + saved_ldflags=$LDFLAGS + saved_cppflags=$CPPFLAGS + jemalloc_have_headers=0 + jemalloc_have_libs=0 + if test "$jemalloc_base_dir" != "/usr"; then + + if test "x$CPPFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting CPPFLAGS to \"-I${jemalloc_include}\"" + CPPFLAGS="-I${jemalloc_include}" + else + ats_addto_bugger="-I${jemalloc_include}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $CPPFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to CPPFLAGS" + CPPFLAGS="$CPPFLAGS $i" + fi + done + fi + + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${jemalloc_ldflags}\"" + LDFLAGS="-L${jemalloc_ldflags}" + else + ats_addto_bugger="-L${jemalloc_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-R${jemalloc_ldflags}\"" + LIBTOOL_LINK_FLAGS="-R${jemalloc_ldflags}" + else + ats_addto_bugger="-R${jemalloc_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + # On Darwin, jemalloc symbols are prefixed with je_. Search for that first, then fall back + # to unadorned symbols. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing je_malloc_stats_print" >&5 +$as_echo_n "checking for library containing je_malloc_stats_print... " >&6; } +if ${ac_cv_search_je_malloc_stats_print+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char je_malloc_stats_print (); +int +main () +{ +return je_malloc_stats_print (); + ; + return 0; +} +_ACEOF +for ac_lib in '' jemalloc; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_je_malloc_stats_print=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_je_malloc_stats_print+:} false; then : + break +fi +done +if ${ac_cv_search_je_malloc_stats_print+:} false; then : + +else + ac_cv_search_je_malloc_stats_print=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_je_malloc_stats_print" >&5 +$as_echo "$ac_cv_search_je_malloc_stats_print" >&6; } +ac_res=$ac_cv_search_je_malloc_stats_print +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + jemalloc_have_libs=1 +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing malloc_stats_print" >&5 +$as_echo_n "checking for library containing malloc_stats_print... " >&6; } +if ${ac_cv_search_malloc_stats_print+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char malloc_stats_print (); +int +main () +{ +return malloc_stats_print (); + ; + return 0; +} +_ACEOF +for ac_lib in '' jemalloc; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_malloc_stats_print=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_malloc_stats_print+:} false; then : + break +fi +done +if ${ac_cv_search_malloc_stats_print+:} false; then : + +else + ac_cv_search_malloc_stats_print=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_malloc_stats_print" >&5 +$as_echo "$ac_cv_search_malloc_stats_print" >&6; } +ac_res=$ac_cv_search_malloc_stats_print +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + jemalloc_have_libs=1 +fi + + +fi + + if test "$jemalloc_have_libs" != "0"; then + +for ac_header in jemalloc/jemalloc.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "jemalloc/jemalloc.h" "ac_cv_header_jemalloc_jemalloc_h" "$ac_includes_default" +if test "x$ac_cv_header_jemalloc_jemalloc_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_JEMALLOC_JEMALLOC_H 1 +_ACEOF + jemalloc_have_headers=1 +fi + +done + +for tsc_i in jemalloc/jemalloc.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + fi + if test "$jemalloc_have_headers" != "0"; then + jemalloch=1 + + if test "x$LIBS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBS to \"-ljemalloc\"" + LIBS="-ljemalloc" + else + ats_addto_bugger="-ljemalloc" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBS" + LIBS="$LIBS $i" + fi + done + fi + + else + CPPFLAGS=$saved_cppflags + LDFLAGS=$saved_ldflags + fi +fi + + + + +# Check whether --with-tcmalloc-lib was given. +if test "${with_tcmalloc_lib+set}" = set; then : + withval=$with_tcmalloc_lib; + with_tcmalloc_lib="$withval" + +else + + with_tcmalloc_lib="tcmalloc" + + +fi + + +has_tcmalloc=0 + +# Check whether --with-tcmalloc was given. +if test "${with_tcmalloc+set}" = set; then : + withval=$with_tcmalloc; + if test "$withval" != "no"; then + if test "x${enable_jemalloc}" = "xyes"; then + as_fn_error $? "Cannot compile with both tcmalloc and jemalloc" "$LINENO" 5 + fi + tcmalloc_have_libs=0 + if test "x$withval" != "xyes" && test "x$withval" != "x"; then + tcmalloc_ldflags="$withval/lib" + + if test "x$LDFLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LDFLAGS to \"-L${tcmalloc_ldflags}\"" + LDFLAGS="-L${tcmalloc_ldflags}" + else + ats_addto_bugger="-L${tcmalloc_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LDFLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LDFLAGS" + LDFLAGS="$LDFLAGS $i" + fi + done + fi + + + if test "x$LIBTOOL_LINK_FLAGS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBTOOL_LINK_FLAGS to \"-rpath ${tcmalloc_ldflags}\"" + LIBTOOL_LINK_FLAGS="-rpath ${tcmalloc_ldflags}" + else + ats_addto_bugger="-rpath ${tcmalloc_ldflags}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBTOOL_LINK_FLAGS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBTOOL_LINK_FLAGS" + LIBTOOL_LINK_FLAGS="$LIBTOOL_LINK_FLAGS $i" + fi + done + fi + + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing tc_cfree" >&5 +$as_echo_n "checking for library containing tc_cfree... " >&6; } +if ${ac_cv_search_tc_cfree+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char tc_cfree (); +int +main () +{ +return tc_cfree (); + ; + return 0; +} +_ACEOF +for ac_lib in '' ${with_tcmalloc_lib}; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_tc_cfree=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_tc_cfree+:} false; then : + break +fi +done +if ${ac_cv_search_tc_cfree+:} false; then : + +else + ac_cv_search_tc_cfree=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_tc_cfree" >&5 +$as_echo "$ac_cv_search_tc_cfree" >&6; } +ac_res=$ac_cv_search_tc_cfree +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + tcmalloc_have_lib=1 +fi + + if test "$tcmalloc_have_lib" != "0"; then + + if test "x$LIBS" = "x"; then + test "x$verbose" = "xyes" && echo " setting LIBS to \"-l${with_tcmalloc_lib}\"" + LIBS="-l${with_tcmalloc_lib}" + else + ats_addto_bugger="-l${with_tcmalloc_lib}" + for i in $ats_addto_bugger; do + ats_addto_duplicate="0" + for j in $LIBS; do + if test "x$i" = "x$j"; then + ats_addto_duplicate="1" + break + fi + done + if test $ats_addto_duplicate = "0"; then + test "x$verbose" = "xyes" && echo " adding \"$i\" to LIBS" + LIBS="$LIBS $i" + fi + done + fi + + has_tcmalloc=1 + fi + fi + +fi + + + + +# +# Check for libreadline/libedit + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a readline compatible library" >&5 +$as_echo_n "checking for a readline compatible library... " >&6; } +if ${ax_cv_lib_readline+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ORIG_LIBS="$LIBS" + for readline_lib in edit editline; do + for termcap_lib in "" termcap curses ncurses; do + if test -z "$termcap_lib"; then + TRY_LIB="-l$readline_lib" + else + TRY_LIB="-l$readline_lib -l$termcap_lib" + fi + LIBS="$ORIG_LIBS $TRY_LIB" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char readline (); +int +main () +{ +return readline (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_lib_readline="$TRY_LIB" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -n "$ax_cv_lib_readline"; then + break + fi + done + if test -n "$ax_cv_lib_readline"; then + break + fi + done + if test -z "$ax_cv_lib_readline"; then + ax_cv_lib_readline="no" + fi + LIBS="$ORIG_LIBS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_lib_readline" >&5 +$as_echo "$ax_cv_lib_readline" >&6; } + + if test "$ax_cv_lib_readline" != "no"; then + ORIG_LIBS="$LIBS" + LIBS="$LIBS $ax_cv_lib_readline" + LIBREADLINE=$ax_cv_lib_readline + + +$as_echo "#define HAVE_LIBREADLINE 1" >>confdefs.h + + for ac_header in readline.h readline/readline.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether readline supports history" >&5 +$as_echo_n "checking whether readline supports history... " >&6; } +if ${ax_cv_lib_readline_history+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_cv_lib_readline_history="no" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char add_history (); +int +main () +{ +return add_history (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_lib_readline_history="yes" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_lib_readline_history" >&5 +$as_echo "$ax_cv_lib_readline_history" >&6; } + if test "$ax_cv_lib_readline_history" = "yes"; then + +$as_echo "#define HAVE_READLINE_HISTORY 1" >>confdefs.h + + for ac_header in history.h readline/history.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + fi + LIBS="$ORIG_LIBS" + fi + + +# On Darwin LuaJIT requires magic link options, otherwise it will crash in luaL_openlibs() at startup. See +# http://luajit.org/install.html. +case $host_os_def in + darwin) + TS_LUAJIT_LDFLAGS="-Wl,-pagezero_size,10000 -Wl,-image_base,100000000" + ;; +esac + + + + +# We should be able to build http_load if epoll(2) is available. + if test x"$ac_cv_func_epoll_ctl" = x"yes"; then + BUILD_HTTP_LOAD_TRUE= + BUILD_HTTP_LOAD_FALSE='#' +else + BUILD_HTTP_LOAD_TRUE='#' + BUILD_HTTP_LOAD_FALSE= +fi + + +# Check for spdylay library +if test "x${enable_spdy}" = "xyes"; then + +pkg_failed=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for SPDYLAY" >&5 +$as_echo_n "checking for SPDYLAY... " >&6; } + +if test -n "$SPDYLAY_CFLAGS"; then + pkg_cv_SPDYLAY_CFLAGS="$SPDYLAY_CFLAGS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libspdylay\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libspdylay") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_SPDYLAY_CFLAGS=`$PKG_CONFIG --cflags "libspdylay" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi +if test -n "$SPDYLAY_LIBS"; then + pkg_cv_SPDYLAY_LIBS="$SPDYLAY_LIBS" + elif test -n "$PKG_CONFIG"; then + if test -n "$PKG_CONFIG" && \ + { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libspdylay\""; } >&5 + ($PKG_CONFIG --exists --print-errors "libspdylay") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + pkg_cv_SPDYLAY_LIBS=`$PKG_CONFIG --libs "libspdylay" 2>/dev/null` + test "x$?" != "x0" && pkg_failed=yes +else + pkg_failed=yes +fi + else + pkg_failed=untried +fi + + + +if test $pkg_failed = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi + if test $_pkg_short_errors_supported = yes; then + SPDYLAY_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libspdylay" 2>&1` + else + SPDYLAY_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libspdylay" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$SPDYLAY_PKG_ERRORS" >&5 + + as_fn_error $? "Package requirements (libspdylay) were not met: + +$SPDYLAY_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +Alternatively, you may set the environment variables SPDYLAY_CFLAGS +and SPDYLAY_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details." "$LINENO" 5 +elif test $pkg_failed = untried; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +Alternatively, you may set the environment variables SPDYLAY_CFLAGS +and SPDYLAY_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details. + +To get pkg-config, see . +See \`config.log' for more details" "$LINENO" 5; } +else + SPDYLAY_CFLAGS=$pkg_cv_SPDYLAY_CFLAGS + SPDYLAY_LIBS=$pkg_cv_SPDYLAY_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +fi +fi + +# ----------------------------------------------------------------------------- +# 5. CHECK FOR HEADER FILES + + +for ac_header in sys/types.h sys/mman.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for tsc_i in sys/types.h sys/mman.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + +for ac_header in sys/epoll.h \ + sys/event.h \ + sys/param.h \ + sys/pset.h \ + sched.h \ + pthread.h \ + pthread_np.h \ + machine/endian.h \ + endian.h \ + sys/sysinfo.h \ + sys/systeminfo.h \ + netinet/in.h \ + netinet/in_systm.h \ + netinet/tcp.h \ + sys/ioctl.h \ + sys/byteorder.h \ + sys/sockio.h \ + sys/prctl.h \ + arpa/nameser.h \ + arpa/nameser_compat.h \ + execinfo.h \ + netdb.h \ + ctype.h \ + siginfo.h \ + malloc.h \ + wait.h \ + float.h \ + libgen.h \ + values.h \ + alloca.h \ + cpio.h \ + stropts.h \ + sys/param.h \ + sys/sysmacros.h \ + math.h \ + stdint.h \ + stdbool.h \ + net/ppp_defs.h \ + ifaddrs.h\ + readline/readline.h \ + editline/readline.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for tsc_i in sys/epoll.h \ + sys/event.h \ + sys/param.h \ + sys/pset.h \ + sched.h \ + pthread.h \ + pthread_np.h \ + machine/endian.h \ + endian.h \ + sys/sysinfo.h \ + sys/systeminfo.h \ + netinet/in.h \ + netinet/in_systm.h \ + netinet/tcp.h \ + sys/ioctl.h \ + sys/byteorder.h \ + sys/sockio.h \ + sys/prctl.h \ + arpa/nameser.h \ + arpa/nameser_compat.h \ + execinfo.h \ + netdb.h \ + ctype.h \ + siginfo.h \ + malloc.h \ + wait.h \ + float.h \ + libgen.h \ + values.h \ + alloca.h \ + cpio.h \ + stropts.h \ + sys/param.h \ + sys/sysmacros.h \ + math.h \ + stdint.h \ + stdbool.h \ + net/ppp_defs.h \ + ifaddrs.h\ + readline/readline.h \ + editline/readline.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +for ac_header in sys/statfs.h sys/statvfs.h sys/disk.h sys/disklabel.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for tsc_i in sys/statfs.h sys/statvfs.h sys/disk.h sys/disklabel.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + +for ac_header in sys/sysctl.h +do : + ac_fn_c_check_header_compile "$LINENO" "sys/sysctl.h" "ac_cv_header_sys_sysctl_h" "#ifdef HAVE_SYS_PARAM_H + #include + #endif + +" +if test "x$ac_cv_header_sys_sysctl_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SYS_SYSCTL_H 1 +_ACEOF + +fi + +done + +for tsc_i in sys/sysctl.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + +for ac_header in sys/cpuset.h +do : + ac_fn_c_check_header_compile "$LINENO" "sys/cpuset.h" "ac_cv_header_sys_cpuset_h" "#ifdef HAVE_SYS_PARAM_H + #include + #endif + +" +if test "x$ac_cv_header_sys_cpuset_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SYS_CPUSET_H 1 +_ACEOF + +fi + +done + +for tsc_i in sys/cpuset.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + +for ac_header in sys/mount.h +do : + ac_fn_c_check_header_compile "$LINENO" "sys/mount.h" "ac_cv_header_sys_mount_h" "#ifdef HAVE_SYS_PARAM_H + #include + #endif + +" +if test "x$ac_cv_header_sys_mount_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SYS_MOUNT_H 1 +_ACEOF + +fi + +done + +for tsc_i in sys/mount.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + +for ac_header in arpa/inet.h +do : + ac_fn_c_check_header_compile "$LINENO" "arpa/inet.h" "ac_cv_header_arpa_inet_h" "#ifdef HAVE_SYS_TYPES_H + #include + #endif + #ifdef HAVE_NETINET_IN_H + #include + #endif + +" +if test "x$ac_cv_header_arpa_inet_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_ARPA_INET_H 1 +_ACEOF + +fi + +done + +for tsc_i in arpa/inet.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + +for ac_header in netinet/ip.h +do : + ac_fn_c_check_header_compile "$LINENO" "netinet/ip.h" "ac_cv_header_netinet_ip_h" "#ifdef HAVE_SYS_TYPES_H + #include + #endif + #ifdef HAVE_NETINET_IN_H + #include + #endif + #ifdef HAVE_NETINET_IN_SYSTM_H + #include + #endif + +" +if test "x$ac_cv_header_netinet_ip_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NETINET_IP_H 1 +_ACEOF + +fi + +done + +for tsc_i in netinet/ip.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + +for ac_header in netinet/ip_icmp.h +do : + ac_fn_c_check_header_compile "$LINENO" "netinet/ip_icmp.h" "ac_cv_header_netinet_ip_icmp_h" "#ifdef HAVE_SYS_TYPES_H + #include + #endif + #ifdef HAVE_NETINET_IN_H + #include + #endif + #ifdef HAVE_NETINET_IP_H + #include + #endif + #ifdef HAVE_NETINET_IN_SYSTM_H + #include + #endif + +" +if test "x$ac_cv_header_netinet_ip_icmp_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NETINET_IP_ICMP_H 1 +_ACEOF + +fi + +done + +for tsc_i in netinet/ip_icmp.h +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + + + + + +# Test for additional pthread interfaces. + +# Darwin pthread_setname_np: +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for 1-parameter version of pthread_setname_np()" >&5 +$as_echo_n "checking for 1-parameter version of pthread_setname_np()... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +#if HAVE_PTHREAD_H +#include +#endif +#if PTHREAD_NP_H +#include +#endif + +int +main () +{ + + pthread_setname_np("conftest"); + + ; + return 0; +} + +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + +$as_echo "#define HAVE_PTHREAD_SETNAME_NP_1 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +# Linux pthread_setname_np: +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for 2-parameter version of pthread_setname_np()" >&5 +$as_echo_n "checking for 2-parameter version of pthread_setname_np()... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +#if HAVE_PTHREAD_H +#include +#endif +#if PTHREAD_NP_H +#include +#endif + +int +main () +{ + + pthread_setname_np(pthread_self(), "conftest"); + + ; + return 0; +} + +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + +$as_echo "#define HAVE_PTHREAD_SETNAME_NP_2 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +# BSD pthread_set_name_np: +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for 2-parameter version of pthread_set_name_np()" >&5 +$as_echo_n "checking for 2-parameter version of pthread_set_name_np()... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +#if HAVE_PTHREAD_H +#include +#endif +#if PTHREAD_NP_H +#include +#endif + +int +main () +{ + + pthread_set_name_np(pthread_self(), "conftest"); + + ; + return 0; +} + +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + +$as_echo "#define HAVE_PTHREAD_SET_NAME_NP_2 1" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +# BSD-derived systems populate the socket length in the structure itself. It's +# redundant to check all of these, but hey, I need the typing practice. +ac_fn_c_check_member "$LINENO" "struct sockaddr" "sa_len" "ac_cv_member_struct_sockaddr_sa_len" "#include +" +if test "x$ac_cv_member_struct_sockaddr_sa_len" = xyes; then : + +fi + +ac_fn_c_check_member "$LINENO" "struct sockaddr_in" "sin_len" "ac_cv_member_struct_sockaddr_in_sin_len" "#include +" +if test "x$ac_cv_member_struct_sockaddr_in_sin_len" = xyes; then : + +fi + +ac_fn_c_check_member "$LINENO" "struct sockaddr_in6" "sin6_len" "ac_cv_member_struct_sockaddr_in6_sin6_len" "#include +" +if test "x$ac_cv_member_struct_sockaddr_in6_sin6_len" = xyes; then : + +fi + + +if test "x${ac_cv_member_struct_sockaddr_sa_len}" = xyes; then + +$as_echo "#define HAVE_STRUCT_SOCKADDR_SA_LEN 1" >>confdefs.h + +fi + +if test "x${ac_cv_member_struct_sockaddr_in_sin_len}" = xyes ; then + +$as_echo "#define HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 1" >>confdefs.h + +fi + +if test "x${ac_cv_member_struct_sockaddr_in6_sin6_len}" = xyes; then + +$as_echo "#define HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN 1" >>confdefs.h + +fi + +if test "x${with_profiler}" = "xyes"; then + +for ac_header in google/profiler.h \ + +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +for tsc_i in google/profiler.h \ + +do + ac_safe=`echo "$tsc_i" | sed 'y%./+-%__p_%'` + tsc_2=`echo "$tsc_i" | sed -e 's%/%_%g' -e 's/\.//g' -e 's/-//g'` + if eval "test \"`echo '$ac_cv_header_'$ac_safe`\" = yes"; then + eval "$tsc_2=1" + else + eval "$tsc_2=0" + fi +done + +fi + +if test "x${enable_posix_cap}" != "xno"; then + for ac_header in sys/capability.h +do : + ac_fn_c_check_header_compile "$LINENO" "sys/capability.h" "ac_cv_header_sys_capability_h" " + +" +if test "x$ac_cv_header_sys_capability_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SYS_CAPABILITY_H 1 +_ACEOF + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "Found POSIX capabilities library but not the header sys/capability.h. POSIX capabilities are not a required feature, you can disable then with --disable-posix-cap +See \`config.log' for more details" "$LINENO" 5; } +fi + +done + +fi + +# Check for high-resolution timestamps in struct stat +ac_fn_c_check_member "$LINENO" "struct stat" "st_mtimespec.tv_nsec" "ac_cv_member_struct_stat_st_mtimespec_tv_nsec" "$ac_includes_default" +if test "x$ac_cv_member_struct_stat_st_mtimespec_tv_nsec" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_STAT_ST_MTIMESPEC_TV_NSEC 1 +_ACEOF + + +fi + +ac_fn_c_check_member "$LINENO" "struct stat" "st_mtim.tv_nsec" "ac_cv_member_struct_stat_st_mtim_tv_nsec" "$ac_includes_default" +if test "x$ac_cv_member_struct_stat_st_mtim_tv_nsec" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC 1 +_ACEOF + + +fi + + + +# +# Configure sockopt value for TPROXY. Look at the enable flag. +# Value 'no' means user forced disable, don't check anything else. +# 'auto' means user didn't say, so silently enable/disable +# based on success. +# A numeric value means enable, don't check, use that value. +# Anything else means user forced, fail if value not found +# in header file. +# We can't just include linux/in.h because it's incompatible with +# netinet/in.h. +# Verify the file exists (is readable), scan for the value we need, +# if found export the value and enable use of the value. +# +ip_transparent=0 +use_tproxy=0 +tproxy_header=/usr/include/linux/in.h +tproxy_usage_enable=" + --enable-tproxy Enable the feature and validate." +tproxy_usage_default=" + --enable-tproxy=force Enable using default sockopt value, no validation." +tproxy_usage_numeric=" + --enable-tproxy=X where X is numeric + Enable, use X for sockopt value, no validation." +tproxy_usage_disable=" + --disable-tproxy Disable feature, no validation." +proxy_usage="$tproxy_usage_enable$tproxy_usage_default$tproxy_usage_numeric$tproxy_usage_disable" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable transparent proxy" >&5 +$as_echo_n "checking whether to enable transparent proxy... " >&6; } +if test "x$enable_tproxy" != "xno"; then : + + if test "$use_posix_cap" -eq 0; then : + + if test "x$enable_tproxy" = xauto; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +else + + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "TPROXY feature requires POSIX capabilities. +See \`config.log' for more details" "$LINENO" 5; } + +fi + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for TPROXY sockopt IP_TRANSPARENT" >&5 +$as_echo_n "checking for TPROXY sockopt IP_TRANSPARENT... " >&6; } + case "$enable_tproxy" in + [0-9][0-9]*) + ip_transparent=$enable_tproxy + use_tproxy=1 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: forced to $ip_transparent" >&5 +$as_echo "forced to $ip_transparent" >&6; } + ;; + force) + ip_transparent=19 + use_tproxy=1 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: forced to $ip_transparent" >&5 +$as_echo "forced to $ip_transparent" >&6; } + ;; + yes|auto) + if test -r $tproxy_header; then : + + ip_transparent=`$AWK "/^#define[ \t]+IP_TRANSPARENT[ \t]+[0-9]+/{print \\$3}" $tproxy_header` + if test "x$ip_transparent" != "x"; then : + + use_tproxy=1 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: set to $ip_transparent" >&5 +$as_echo "set to $ip_transparent" >&6; } + +else + + ip_transparent=0 + if test "x$enable_tproxy" = xauto; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "tproxy feature enabled but the sockopt value was not found in $tproxy_header. Try one of$tproxy_usage_default$tproxy_usage_numeric$tproxy_usage_disable +See \`config.log' for more details" "$LINENO" 5; } + +fi + +fi + +else + + if test "x$enable_tproxy" = xauto; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "tproxy feature enabled but the header file $tproxy_header was not readable. Try one of$tproxy_usage_default$tproxy_usage_numeric$tproxy_usage_disable +See \`config.log' for more details" "$LINENO" 5; } + +fi + +fi + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "Invalid argument to feature tproxy.$tproxy_usage +See \`config.log' for more details" "$LINENO" 5; } + ;; + esac + +fi + +fi + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for SO_MARK" >&5 +$as_echo_n "checking for SO_MARK... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + +if( SO_MARK > 0) return 0; +else return 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + has_so_mark=1 +msg=yes +else + +has_so_mark=0 +msg=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $msg" >&5 +$as_echo "$msg" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for IP_TOS" >&5 +$as_echo_n "checking for IP_TOS... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include +#include +#include + +int +main () +{ + +if( IP_TOS > 0) return 0; +else return 1; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + has_ip_tos=1 +msg=yes +else + +has_ip_tos=0 +msg=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $msg" >&5 +$as_echo "$msg" >&6; } + + + +default_loopback_iface="" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for loopback network interface" >&5 +$as_echo_n "checking for loopback network interface... " >&6; } +case $host_os in + linux*) + default_loopback_iface=lo + ;; +darwin* | freebsd* | solaris*) + default_loopback_iface=lo0 + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $default_loopback_iface" >&5 +$as_echo "$default_loopback_iface" >&6; } + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking style of gethostbyname_r routine" >&5 +$as_echo_n "checking style of gethostbyname_r routine... " >&6; } +if ${ac_cv_gethostbyname_r_style+:} false; then : + $as_echo_n "(cached) " >&6 +else + ats_save_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS $CFLAGS_WARN" + if test "$ac_cv_c_compiler_gnu" = "yes"; then + CFLAGS="$CFLAGS -Werror" + fi + CFLAGS=$(echo $CFLAGS | sed -e 's/^-w$//' -e 's/^-w //' -e 's/ -w$//' -e 's/ -w / /') + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#ifdef HAVE_STDLIB_H +#include +#endif + +int +main () +{ + +int tmp = gethostbyname_r((const char *) 0, (struct hostent *) 0, + (char *) 0, 0, (struct hostent **) 0, &tmp); +/* use tmp to suppress the warning */ +tmp=0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_gethostbyname_r_style=glibc2 +else + ac_cv_gethostbyname_r_style=none +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS=$ats_save_CFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_gethostbyname_r_style" >&5 +$as_echo "$ac_cv_gethostbyname_r_style" >&6; } + +if test "$ac_cv_gethostbyname_r_style" = "glibc2"; then + gethostbyname_r_glibc2=1 + +$as_echo "#define GETHOSTBYNAME_R_GLIBC2 1" >>confdefs.h + +else + gethostbyname_r_glibc2=0 +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking 3rd argument to the gethostbyname_r routines" >&5 +$as_echo_n "checking 3rd argument to the gethostbyname_r routines... " >&6; } +if ${ac_cv_gethostbyname_r_arg+:} false; then : + $as_echo_n "(cached) " >&6 +else + ats_save_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS $CFLAGS_WARN" + if test "$ac_cv_c_compiler_gnu" = "yes"; then + CFLAGS="$CFLAGS -Werror" + fi + CFLAGS=$(echo $CFLAGS | sed -e 's/^-w$//' -e 's/^-w //' -e 's/ -w$//' -e 's/ -w / /') + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_NETINET_IN_H +#include +#endif +#ifdef HAVE_ARPA_INET_H +#include +#endif +#ifdef HAVE_NETDB_H +#include +#endif +#ifdef HAVE_STDLIB_H +#include +#endif + +int +main () +{ + +int tmp = gethostbyname_r((const char *) 0, (struct hostent *) 0, + (struct hostent_data *) 0); +/* use tmp to suppress the warning */ +tmp=0; + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_gethostbyname_r_arg=hostent_data +else + ac_cv_gethostbyname_r_arg=char +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS=$ats_save_CFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_gethostbyname_r_arg" >&5 +$as_echo "$ac_cv_gethostbyname_r_arg" >&6; } + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking IN6_IS_ADDR_UNSPECIFIED macro works" >&5 +$as_echo_n "checking IN6_IS_ADDR_UNSPECIFIED macro works... " >&6; } +if ${ac_cv_macro_in6_is_addr_unspecified+:} false; then : + $as_echo_n "(cached) " >&6 +else + ats_save_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS $CFLAGS_WARN" + if test "$ac_cv_c_compiler_gnu" = "yes"; then + CFLAGS="$CFLAGS -Werror" + fi + CFLAGS=$(echo $CFLAGS | sed -e 's/^-w$//' -e 's/^-w //' -e 's/ -w$//' -e 's/ -w / /') + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + + (void) IN6_IS_ADDR_UNSPECIFIED(0); + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_macro_in6_is_addr_unspecified=yes +else + ac_cv_macro_in6_is_addr_unspecified=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS=$ats_save_CFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_macro_in6_is_addr_unspecified" >&5 +$as_echo "$ac_cv_macro_in6_is_addr_unspecified" >&6; } + +has_in6_is_addr_unspecified=1 +if test "x$ac_cv_macro_in6_is_addr_unspecified" = "xno"; then + has_in6_is_addr_unspecified=0 +fi + + + +ac_fn_c_check_type "$LINENO" "struct tcp_info" "ac_cv_type_struct_tcp_info" " + #include + #include + + +" +if test "x$ac_cv_type_struct_tcp_info" = xyes; then : + +$as_echo "#define HAVE_STRUCT_TCP_INFO 1" >>confdefs.h + +fi + + +# See if we can build the remap_stats plugin +if test "x$enable_experimental_plugins" = xyes; then : + + for ac_header in search.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "search.h" "ac_cv_header_search_h" "$ac_includes_default" +if test "x$ac_cv_header_search_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SEARCH_H 1 +_ACEOF + +fi + +done + + if test "x$ac_cv_header_search_h" = "xyes"; then : + + ac_fn_c_check_type "$LINENO" "struct hsearch_data" "ac_cv_type_struct_hsearch_data" "#include +" +if test "x$ac_cv_type_struct_hsearch_data" = xyes; then : + +fi + + for ac_func in hcreate_r hsearch_r +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + +fi + +fi + if test "x$enable_experimental_plugins" = "xyes" -a "x$ac_cv_header_search_h" = "xyes" -a "x$ac_cv_type_struct_hsearch_data" = "xyes" -a "x$ac_cv_func_hcreate_r" = "xyes" -a "x$ac_cv_func_hsearch_r" = "xyes" ; then + BUILD_REMAP_STATS_PLUGIN_TRUE= + BUILD_REMAP_STATS_PLUGIN_FALSE='#' +else + BUILD_REMAP_STATS_PLUGIN_TRUE='#' + BUILD_REMAP_STATS_PLUGIN_FALSE= +fi + + + if test "x$enable_experimental_plugins" = "xyes" -a "x${enable_collapsed_connection_plugin}" != "xno" ; then + BUILD_COLLAPSED_CONNECTION_PLUGIN_TRUE= + BUILD_COLLAPSED_CONNECTION_PLUGIN_FALSE='#' +else + BUILD_COLLAPSED_CONNECTION_PLUGIN_TRUE='#' + BUILD_COLLAPSED_CONNECTION_PLUGIN_FALSE= +fi + + +# +# use modular IOCORE +# +iocore_include_dirs="-I\$(top_srcdir)/iocore/eventsystem \ +-I\$(top_srcdir)/iocore/net \ +-I\$(top_srcdir)/iocore/aio \ +-I\$(top_srcdir)/iocore/hostdb \ +-I\$(top_srcdir)/iocore/cache \ +-I\$(top_srcdir)/iocore/cluster \ +-I\$(top_srcdir)/iocore/utils \ +-I\$(top_srcdir)/iocore/dns" + +# Testing Framework suffix generation hack +TFW_PACKAGE_SUFFIX=$os_type + +MGMT_DEFS="$MGMT_DEFS -DMGMT_USE_SYSLOG" + + + + + + + + + + + + + + + + + + + +# ----------------------------------------------------------------------------- +# 6. OUTPUT FILES + +ac_config_files="$ac_config_files Makefile cmd/Makefile cmd/traffic_cop/Makefile cmd/traffic_line/Makefile cmd/traffic_top/Makefile doc/Doxyfile doc/Makefile example/Makefile iocore/Makefile iocore/aio/Makefile iocore/cache/Makefile iocore/cluster/Makefile iocore/dns/Makefile iocore/eventsystem/Makefile iocore/hostdb/Makefile iocore/net/Makefile iocore/utils/Makefile lib/Makefile lib/perl/Makefile lib/perl/lib/Apache/TS.pm lib/records/Makefile lib/ts/Makefile lib/ts/apidefs.h lib/ts/ink_config.h lib/tsconfig/Makefile lib/wccp/Makefile mgmt/Makefile mgmt/api/Makefile mgmt/api/include/Makefile mgmt/cluster/Makefile mgmt/stats/Makefile mgmt/utils/Makefile mgmt/web2/Makefile plugins/Makefile plugins/cacheurl/Makefile plugins/conf_remap/Makefile plugins/gzip/Makefile plugins/header_rewrite/Makefile plugins/libloader/Makefile plugins/regex_remap/Makefile plugins/stats_over_http/Makefile plugins/tcpinfo/Makefile proxy/Makefile proxy/api/ts/Makefile proxy/config/Makefile proxy/config/body_factory/Makefile proxy/config/body_factory/default/Makefile proxy/config/records.config.default proxy/config/storage.config.default proxy/congest/Makefile proxy/hdrs/Makefile proxy/http/Makefile proxy/http/remap/Makefile proxy/logging/Makefile proxy/shared/Makefile proxy/spdy/Makefile rc/Makefile rc/trafficserver rc/trafficserver.conf rc/trafficserver.service rc/trafficserver.xml tools/Makefile tools/tsxs" + + +if test "x$enable_experimental_plugins" = xyes; then : + + ac_config_files="$ac_config_files plugins/experimental/Makefile plugins/experimental/authproxy/Makefile plugins/experimental/background_fetch/Makefile plugins/experimental/balancer/Makefile plugins/experimental/buffer_upload/Makefile plugins/experimental/channel_stats/Makefile plugins/experimental/collapsed_connection/Makefile plugins/experimental/custom_redirect/Makefile plugins/experimental/escalate/Makefile plugins/experimental/esi/Makefile plugins/experimental/geoip_acl/Makefile plugins/experimental/healthchecks/Makefile plugins/experimental/hipes/Makefile plugins/experimental/metalink/Makefile plugins/experimental/remap_stats/Makefile plugins/experimental/regex_revalidate/Makefile plugins/experimental/s3_auth/Makefile plugins/experimental/stale_while_revalidate/Makefile plugins/experimental/ts_lua/Makefile plugins/experimental/url_sig/Makefile plugins/experimental/xdebug/Makefile" + +fi + +if test "x$enable_cppapi" = xyes; then : + + ac_config_files="$ac_config_files lib/atscppapi/Makefile lib/atscppapi/examples/Makefile lib/atscppapi/examples/async_http_fetch/Makefile lib/atscppapi/examples/async_timer/Makefile lib/atscppapi/examples/clientredirect/Makefile lib/atscppapi/examples/clientrequest/Makefile lib/atscppapi/examples/customresponse/Makefile lib/atscppapi/examples/boom/Makefile lib/atscppapi/examples/globalhook/Makefile lib/atscppapi/examples/gzip_transformation/Makefile lib/atscppapi/examples/helloworld/Makefile lib/atscppapi/examples/intercept/Makefile lib/atscppapi/examples/internal_transaction_handling/Makefile lib/atscppapi/examples/logger_example/Makefile lib/atscppapi/examples/multiple_transaction_hooks/Makefile lib/atscppapi/examples/null_transformation_plugin/Makefile lib/atscppapi/examples/post_buffer/Makefile lib/atscppapi/examples/remap_plugin/Makefile lib/atscppapi/examples/serverresponse/Makefile lib/atscppapi/examples/stat_example/Makefile lib/atscppapi/examples/timeout_example/Makefile lib/atscppapi/examples/transactionhook/Makefile lib/atscppapi/src/Makefile" + +fi + + +# ----------------------------------------------------------------------------- +# 7. autoheader TEMPLATES + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 +$as_echo_n "checking that generated files are newer than configure... " >&6; } + if test -n "$am_sleep_pid"; then + # Hide warnings about reused PIDs. + wait $am_sleep_pid 2>/dev/null + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 +$as_echo "done" >&6; } + if test -n "$EXEEXT"; then + am__EXEEXT_TRUE= + am__EXEEXT_FALSE='#' +else + am__EXEEXT_TRUE='#' + am__EXEEXT_FALSE= +fi + +if test -z "${BUILD_TESTS_TRUE}" && test -z "${BUILD_TESTS_FALSE}"; then + as_fn_error $? "conditional \"BUILD_TESTS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${STATIC_LIBTS_TRUE}" && test -z "${STATIC_LIBTS_FALSE}"; then + as_fn_error $? "conditional \"STATIC_LIBTS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_WCCP_TRUE}" && test -z "${BUILD_WCCP_FALSE}"; then + as_fn_error $? "conditional \"BUILD_WCCP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_INTERIM_CACHE_TRUE}" && test -z "${BUILD_INTERIM_CACHE_FALSE}"; then + as_fn_error $? "conditional \"BUILD_INTERIM_CACHE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_SPDY_TRUE}" && test -z "${BUILD_SPDY_FALSE}"; then + as_fn_error $? "conditional \"BUILD_SPDY\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_EXPERIMENTAL_PLUGINS_TRUE}" && test -z "${BUILD_EXPERIMENTAL_PLUGINS_FALSE}"; then + as_fn_error $? "conditional \"BUILD_EXPERIMENTAL_PLUGINS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_EXAMPLE_PLUGINS_TRUE}" && test -z "${BUILD_EXAMPLE_PLUGINS_FALSE}"; then + as_fn_error $? "conditional \"BUILD_EXAMPLE_PLUGINS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_TEST_TOOLS_TRUE}" && test -z "${BUILD_TEST_TOOLS_FALSE}"; then + as_fn_error $? "conditional \"BUILD_TEST_TOOLS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${ENABLE_CPPAPI_TRUE}" && test -z "${ENABLE_CPPAPI_FALSE}"; then + as_fn_error $? "conditional \"ENABLE_CPPAPI\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_LUAJIT_TRUE}" && test -z "${BUILD_LUAJIT_FALSE}"; then + as_fn_error $? "conditional \"BUILD_LUAJIT\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + as_fn_error $? "conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCCAS_TRUE}" && test -z "${am__fastdepCCAS_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCCAS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_HAVE_CXX_11_TRUE}" && test -z "${BUILD_HAVE_CXX_11_FALSE}"; then + as_fn_error $? "conditional \"BUILD_HAVE_CXX_11\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_MANPAGES_TRUE}" && test -z "${BUILD_MANPAGES_FALSE}"; then + as_fn_error $? "conditional \"BUILD_MANPAGES\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_MANPAGES_TRUE}" && test -z "${BUILD_MANPAGES_FALSE}"; then + as_fn_error $? "conditional \"BUILD_MANPAGES\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_PERL_LIB_TRUE}" && test -z "${BUILD_PERL_LIB_FALSE}"; then + as_fn_error $? "conditional \"BUILD_PERL_LIB\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_PERL_LIB_TRUE}" && test -z "${BUILD_PERL_LIB_FALSE}"; then + as_fn_error $? "conditional \"BUILD_PERL_LIB\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_HAVE_LIBCXX_TRUE}" && test -z "${BUILD_HAVE_LIBCXX_FALSE}"; then + as_fn_error $? "conditional \"BUILD_HAVE_LIBCXX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_HAVE_LIBCXX_TRUE}" && test -z "${BUILD_HAVE_LIBCXX_FALSE}"; then + as_fn_error $? "conditional \"BUILD_HAVE_LIBCXX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_TSTOP_TRUE}" && test -z "${BUILD_TSTOP_FALSE}"; then + as_fn_error $? "conditional \"BUILD_TSTOP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_TSTOP_TRUE}" && test -z "${BUILD_TSTOP_FALSE}"; then + as_fn_error $? "conditional \"BUILD_TSTOP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_TSTOP_TRUE}" && test -z "${BUILD_TSTOP_FALSE}"; then + as_fn_error $? "conditional \"BUILD_TSTOP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_TSTOP_TRUE}" && test -z "${BUILD_TSTOP_FALSE}"; then + as_fn_error $? "conditional \"BUILD_TSTOP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_TSTOP_TRUE}" && test -z "${BUILD_TSTOP_FALSE}"; then + as_fn_error $? "conditional \"BUILD_TSTOP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_HAVE_BOOST_TRUE}" && test -z "${BUILD_HAVE_BOOST_FALSE}"; then + as_fn_error $? "conditional \"BUILD_HAVE_BOOST\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_HAVE_BOOST_TRUE}" && test -z "${BUILD_HAVE_BOOST_FALSE}"; then + as_fn_error $? "conditional \"BUILD_HAVE_BOOST\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_GEOIP_PLUGIN_TRUE}" && test -z "${BUILD_GEOIP_PLUGIN_FALSE}"; then + as_fn_error $? "conditional \"BUILD_GEOIP_PLUGIN\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_HEALTHCHECK_PLUGIN_TRUE}" && test -z "${BUILD_HEALTHCHECK_PLUGIN_FALSE}"; then + as_fn_error $? "conditional \"BUILD_HEALTHCHECK_PLUGIN\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_HTTP_LOAD_TRUE}" && test -z "${BUILD_HTTP_LOAD_FALSE}"; then + as_fn_error $? "conditional \"BUILD_HTTP_LOAD\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_REMAP_STATS_PLUGIN_TRUE}" && test -z "${BUILD_REMAP_STATS_PLUGIN_FALSE}"; then + as_fn_error $? "conditional \"BUILD_REMAP_STATS_PLUGIN\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_COLLAPSED_CONNECTION_PLUGIN_TRUE}" && test -z "${BUILD_COLLAPSED_CONNECTION_PLUGIN_FALSE}"; then + as_fn_error $? "conditional \"BUILD_COLLAPSED_CONNECTION_PLUGIN\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by Apache Traffic Server $as_me 5.0.1, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to . +Apache Traffic Server home page: ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +Apache Traffic Server config.status 5.0.1 +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +MKDIR_P='$MKDIR_P' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# +AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' +macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' +macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' +enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' +pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' +enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' +SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' +ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' +PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' +host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' +host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' +host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' +build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' +build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' +build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' +SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' +Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' +GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' +EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' +FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' +LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' +NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' +LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' +max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' +ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' +exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' +lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' +lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' +lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' +lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' +lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' +reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' +reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' +OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' +deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' +file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' +file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' +want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' +DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' +sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' +AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' +AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' +archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' +STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' +RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' +old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' +lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' +CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' +CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' +compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' +GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' +nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' +lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' +objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' +MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' +need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' +MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' +DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' +NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' +LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' +OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' +libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' +shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' +extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' +compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' +module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' +with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' +no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' +hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' +hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' +inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' +link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' +exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' +include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' +prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' +postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' +file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' +variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' +need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' +version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' +runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' +libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' +library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' +soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' +install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' +postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' +postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' +finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' +hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' +sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' +sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`' +hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' +enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' +old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' +striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' +predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' +postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' +predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' +postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' +LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' +reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' +reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' +GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' +inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' +link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' +always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' +exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' +predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' +postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' +predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' +postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' + +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL \ +ECHO \ +PATH_SEPARATOR \ +SED \ +GREP \ +EGREP \ +FGREP \ +LD \ +NM \ +LN_S \ +lt_SP2NL \ +lt_NL2SP \ +reload_flag \ +OBJDUMP \ +deplibs_check_method \ +file_magic_cmd \ +file_magic_glob \ +want_nocaseglob \ +DLLTOOL \ +sharedlib_from_linklib_cmd \ +AR \ +AR_FLAGS \ +archiver_list_spec \ +STRIP \ +RANLIB \ +CC \ +CFLAGS \ +compiler \ +lt_cv_sys_global_symbol_pipe \ +lt_cv_sys_global_symbol_to_cdecl \ +lt_cv_sys_global_symbol_to_c_name_address \ +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +nm_file_list_spec \ +lt_prog_compiler_no_builtin_flag \ +lt_prog_compiler_pic \ +lt_prog_compiler_wl \ +lt_prog_compiler_static \ +lt_cv_prog_compiler_c_o \ +need_locks \ +MANIFEST_TOOL \ +DSYMUTIL \ +NMEDIT \ +LIPO \ +OTOOL \ +OTOOL64 \ +shrext_cmds \ +export_dynamic_flag_spec \ +whole_archive_flag_spec \ +compiler_needs_object \ +with_gnu_ld \ +allow_undefined_flag \ +no_undefined_flag \ +hardcode_libdir_flag_spec \ +hardcode_libdir_separator \ +exclude_expsyms \ +include_expsyms \ +file_list_spec \ +variables_saved_for_relink \ +libname_spec \ +library_names_spec \ +soname_spec \ +install_override_mode \ +finish_eval \ +old_striplib \ +striplib \ +compiler_lib_search_dirs \ +predep_objects \ +postdep_objects \ +predeps \ +postdeps \ +compiler_lib_search_path \ +LD_CXX \ +reload_flag_CXX \ +compiler_CXX \ +lt_prog_compiler_no_builtin_flag_CXX \ +lt_prog_compiler_pic_CXX \ +lt_prog_compiler_wl_CXX \ +lt_prog_compiler_static_CXX \ +lt_cv_prog_compiler_c_o_CXX \ +export_dynamic_flag_spec_CXX \ +whole_archive_flag_spec_CXX \ +compiler_needs_object_CXX \ +with_gnu_ld_CXX \ +allow_undefined_flag_CXX \ +no_undefined_flag_CXX \ +hardcode_libdir_flag_spec_CXX \ +hardcode_libdir_separator_CXX \ +exclude_expsyms_CXX \ +include_expsyms_CXX \ +file_list_spec_CXX \ +compiler_lib_search_dirs_CXX \ +predep_objects_CXX \ +postdep_objects_CXX \ +predeps_CXX \ +postdeps_CXX \ +compiler_lib_search_path_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds \ +old_postinstall_cmds \ +old_postuninstall_cmds \ +old_archive_cmds \ +extract_expsyms_cmds \ +old_archive_from_new_cmds \ +old_archive_from_expsyms_cmds \ +archive_cmds \ +archive_expsym_cmds \ +module_cmds \ +module_expsym_cmds \ +export_symbols_cmds \ +prelink_cmds \ +postlink_cmds \ +postinstall_cmds \ +postuninstall_cmds \ +finish_cmds \ +sys_lib_search_path_spec \ +sys_lib_dlsearch_path_spec \ +reload_cmds_CXX \ +old_archive_cmds_CXX \ +old_archive_from_new_cmds_CXX \ +old_archive_from_expsyms_cmds_CXX \ +archive_cmds_CXX \ +archive_expsym_cmds_CXX \ +module_cmds_CXX \ +module_expsym_cmds_CXX \ +export_symbols_cmds_CXX \ +prelink_cmds_CXX \ +postlink_cmds_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +ac_aux_dir='$ac_aux_dir' +xsi_shell='$xsi_shell' +lt_shell_append='$lt_shell_append' + +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile' + + + + + + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "lib/ts/ink_autoconf.h") CONFIG_HEADERS="$CONFIG_HEADERS lib/ts/ink_autoconf.h" ;; + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "cmd/Makefile") CONFIG_FILES="$CONFIG_FILES cmd/Makefile" ;; + "cmd/traffic_cop/Makefile") CONFIG_FILES="$CONFIG_FILES cmd/traffic_cop/Makefile" ;; + "cmd/traffic_line/Makefile") CONFIG_FILES="$CONFIG_FILES cmd/traffic_line/Makefile" ;; + "cmd/traffic_top/Makefile") CONFIG_FILES="$CONFIG_FILES cmd/traffic_top/Makefile" ;; + "doc/Doxyfile") CONFIG_FILES="$CONFIG_FILES doc/Doxyfile" ;; + "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;; + "example/Makefile") CONFIG_FILES="$CONFIG_FILES example/Makefile" ;; + "iocore/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/Makefile" ;; + "iocore/aio/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/aio/Makefile" ;; + "iocore/cache/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/cache/Makefile" ;; + "iocore/cluster/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/cluster/Makefile" ;; + "iocore/dns/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/dns/Makefile" ;; + "iocore/eventsystem/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/eventsystem/Makefile" ;; + "iocore/hostdb/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/hostdb/Makefile" ;; + "iocore/net/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/net/Makefile" ;; + "iocore/utils/Makefile") CONFIG_FILES="$CONFIG_FILES iocore/utils/Makefile" ;; + "lib/Makefile") CONFIG_FILES="$CONFIG_FILES lib/Makefile" ;; + "lib/perl/Makefile") CONFIG_FILES="$CONFIG_FILES lib/perl/Makefile" ;; + "lib/perl/lib/Apache/TS.pm") CONFIG_FILES="$CONFIG_FILES lib/perl/lib/Apache/TS.pm" ;; + "lib/records/Makefile") CONFIG_FILES="$CONFIG_FILES lib/records/Makefile" ;; + "lib/ts/Makefile") CONFIG_FILES="$CONFIG_FILES lib/ts/Makefile" ;; + "lib/ts/apidefs.h") CONFIG_FILES="$CONFIG_FILES lib/ts/apidefs.h" ;; + "lib/ts/ink_config.h") CONFIG_FILES="$CONFIG_FILES lib/ts/ink_config.h" ;; + "lib/tsconfig/Makefile") CONFIG_FILES="$CONFIG_FILES lib/tsconfig/Makefile" ;; + "lib/wccp/Makefile") CONFIG_FILES="$CONFIG_FILES lib/wccp/Makefile" ;; + "mgmt/Makefile") CONFIG_FILES="$CONFIG_FILES mgmt/Makefile" ;; + "mgmt/api/Makefile") CONFIG_FILES="$CONFIG_FILES mgmt/api/Makefile" ;; + "mgmt/api/include/Makefile") CONFIG_FILES="$CONFIG_FILES mgmt/api/include/Makefile" ;; + "mgmt/cluster/Makefile") CONFIG_FILES="$CONFIG_FILES mgmt/cluster/Makefile" ;; + "mgmt/stats/Makefile") CONFIG_FILES="$CONFIG_FILES mgmt/stats/Makefile" ;; + "mgmt/utils/Makefile") CONFIG_FILES="$CONFIG_FILES mgmt/utils/Makefile" ;; + "mgmt/web2/Makefile") CONFIG_FILES="$CONFIG_FILES mgmt/web2/Makefile" ;; + "plugins/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/Makefile" ;; + "plugins/cacheurl/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/cacheurl/Makefile" ;; + "plugins/conf_remap/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/conf_remap/Makefile" ;; + "plugins/gzip/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/gzip/Makefile" ;; + "plugins/header_rewrite/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/header_rewrite/Makefile" ;; + "plugins/libloader/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/libloader/Makefile" ;; + "plugins/regex_remap/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/regex_remap/Makefile" ;; + "plugins/stats_over_http/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/stats_over_http/Makefile" ;; + "plugins/tcpinfo/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/tcpinfo/Makefile" ;; + "proxy/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/Makefile" ;; + "proxy/api/ts/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/api/ts/Makefile" ;; + "proxy/config/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/config/Makefile" ;; + "proxy/config/body_factory/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/config/body_factory/Makefile" ;; + "proxy/config/body_factory/default/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/config/body_factory/default/Makefile" ;; + "proxy/config/records.config.default") CONFIG_FILES="$CONFIG_FILES proxy/config/records.config.default" ;; + "proxy/config/storage.config.default") CONFIG_FILES="$CONFIG_FILES proxy/config/storage.config.default" ;; + "proxy/congest/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/congest/Makefile" ;; + "proxy/hdrs/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/hdrs/Makefile" ;; + "proxy/http/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/http/Makefile" ;; + "proxy/http/remap/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/http/remap/Makefile" ;; + "proxy/logging/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/logging/Makefile" ;; + "proxy/shared/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/shared/Makefile" ;; + "proxy/spdy/Makefile") CONFIG_FILES="$CONFIG_FILES proxy/spdy/Makefile" ;; + "rc/Makefile") CONFIG_FILES="$CONFIG_FILES rc/Makefile" ;; + "rc/trafficserver") CONFIG_FILES="$CONFIG_FILES rc/trafficserver" ;; + "rc/trafficserver.conf") CONFIG_FILES="$CONFIG_FILES rc/trafficserver.conf" ;; + "rc/trafficserver.service") CONFIG_FILES="$CONFIG_FILES rc/trafficserver.service" ;; + "rc/trafficserver.xml") CONFIG_FILES="$CONFIG_FILES rc/trafficserver.xml" ;; + "tools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/Makefile" ;; + "tools/tsxs") CONFIG_FILES="$CONFIG_FILES tools/tsxs" ;; + "plugins/experimental/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/Makefile" ;; + "plugins/experimental/authproxy/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/authproxy/Makefile" ;; + "plugins/experimental/background_fetch/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/background_fetch/Makefile" ;; + "plugins/experimental/balancer/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/balancer/Makefile" ;; + "plugins/experimental/buffer_upload/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/buffer_upload/Makefile" ;; + "plugins/experimental/channel_stats/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/channel_stats/Makefile" ;; + "plugins/experimental/collapsed_connection/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/collapsed_connection/Makefile" ;; + "plugins/experimental/custom_redirect/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/custom_redirect/Makefile" ;; + "plugins/experimental/escalate/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/escalate/Makefile" ;; + "plugins/experimental/esi/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/esi/Makefile" ;; + "plugins/experimental/geoip_acl/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/geoip_acl/Makefile" ;; + "plugins/experimental/healthchecks/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/healthchecks/Makefile" ;; + "plugins/experimental/hipes/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/hipes/Makefile" ;; + "plugins/experimental/metalink/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/metalink/Makefile" ;; + "plugins/experimental/remap_stats/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/remap_stats/Makefile" ;; + "plugins/experimental/regex_revalidate/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/regex_revalidate/Makefile" ;; + "plugins/experimental/s3_auth/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/s3_auth/Makefile" ;; + "plugins/experimental/stale_while_revalidate/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/stale_while_revalidate/Makefile" ;; + "plugins/experimental/ts_lua/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/ts_lua/Makefile" ;; + "plugins/experimental/url_sig/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/url_sig/Makefile" ;; + "plugins/experimental/xdebug/Makefile") CONFIG_FILES="$CONFIG_FILES plugins/experimental/xdebug/Makefile" ;; + "lib/atscppapi/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/Makefile" ;; + "lib/atscppapi/examples/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/Makefile" ;; + "lib/atscppapi/examples/async_http_fetch/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/async_http_fetch/Makefile" ;; + "lib/atscppapi/examples/async_timer/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/async_timer/Makefile" ;; + "lib/atscppapi/examples/clientredirect/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/clientredirect/Makefile" ;; + "lib/atscppapi/examples/clientrequest/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/clientrequest/Makefile" ;; + "lib/atscppapi/examples/customresponse/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/customresponse/Makefile" ;; + "lib/atscppapi/examples/boom/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/boom/Makefile" ;; + "lib/atscppapi/examples/globalhook/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/globalhook/Makefile" ;; + "lib/atscppapi/examples/gzip_transformation/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/gzip_transformation/Makefile" ;; + "lib/atscppapi/examples/helloworld/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/helloworld/Makefile" ;; + "lib/atscppapi/examples/intercept/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/intercept/Makefile" ;; + "lib/atscppapi/examples/internal_transaction_handling/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/internal_transaction_handling/Makefile" ;; + "lib/atscppapi/examples/logger_example/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/logger_example/Makefile" ;; + "lib/atscppapi/examples/multiple_transaction_hooks/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/multiple_transaction_hooks/Makefile" ;; + "lib/atscppapi/examples/null_transformation_plugin/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/null_transformation_plugin/Makefile" ;; + "lib/atscppapi/examples/post_buffer/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/post_buffer/Makefile" ;; + "lib/atscppapi/examples/remap_plugin/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/remap_plugin/Makefile" ;; + "lib/atscppapi/examples/serverresponse/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/serverresponse/Makefile" ;; + "lib/atscppapi/examples/stat_example/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/stat_example/Makefile" ;; + "lib/atscppapi/examples/timeout_example/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/timeout_example/Makefile" ;; + "lib/atscppapi/examples/transactionhook/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/examples/transactionhook/Makefile" ;; + "lib/atscppapi/src/Makefile") CONFIG_FILES="$CONFIG_FILES lib/atscppapi/src/Makefile" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named 'Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`$as_dirname -- "$mf" || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running 'make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "$am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`$as_dirname -- "$file" || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir=$dirpart/$fdir; as_fn_mkdir_p + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + +# The names of the tagged configurations supported by this script. +available_tags="CXX " + +# ### BEGIN LIBTOOL CONFIG + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and in which our libraries should be installed. +lt_sysroot=$lt_sysroot + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain="$ac_aux_dir/ltmain.sh" + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + if test x"$xsi_shell" = xyes; then + sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ +func_dirname ()\ +{\ +\ case ${1} in\ +\ */*) func_dirname_result="${1%/*}${2}" ;;\ +\ * ) func_dirname_result="${3}" ;;\ +\ esac\ +} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_basename ()$/,/^} # func_basename /c\ +func_basename ()\ +{\ +\ func_basename_result="${1##*/}"\ +} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ +func_dirname_and_basename ()\ +{\ +\ case ${1} in\ +\ */*) func_dirname_result="${1%/*}${2}" ;;\ +\ * ) func_dirname_result="${3}" ;;\ +\ esac\ +\ func_basename_result="${1##*/}"\ +} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ +func_stripname ()\ +{\ +\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ +\ # positional parameters, so assign one to ordinary parameter first.\ +\ func_stripname_result=${3}\ +\ func_stripname_result=${func_stripname_result#"${1}"}\ +\ func_stripname_result=${func_stripname_result%"${2}"}\ +} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ +func_split_long_opt ()\ +{\ +\ func_split_long_opt_name=${1%%=*}\ +\ func_split_long_opt_arg=${1#*=}\ +} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ +func_split_short_opt ()\ +{\ +\ func_split_short_opt_arg=${1#??}\ +\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ +} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ +func_lo2o ()\ +{\ +\ case ${1} in\ +\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ +\ *) func_lo2o_result=${1} ;;\ +\ esac\ +} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_xform ()$/,/^} # func_xform /c\ +func_xform ()\ +{\ + func_xform_result=${1%.*}.lo\ +} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_arith ()$/,/^} # func_arith /c\ +func_arith ()\ +{\ + func_arith_result=$(( $* ))\ +} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_len ()$/,/^} # func_len /c\ +func_len ()\ +{\ + func_len_result=${#1}\ +} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + +fi + +if test x"$lt_shell_append" = xyes; then + sed -e '/^func_append ()$/,/^} # func_append /c\ +func_append ()\ +{\ + eval "${1}+=\\${2}"\ +} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ +func_append_quoted ()\ +{\ +\ func_quote_for_eval "${2}"\ +\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ +} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + # Save a `func_append' function call where possible by direct use of '+=' + sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +else + # Save a `func_append' function call even when '+=' is not available + sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +fi + +if test x"$_lt_function_replace_fail" = x":"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 +$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} +fi + + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: Build option summary: + CC: $CC + CXX: $CXX + CPP: $CPP + CFLAGS: $CFLAGS + SHARED_CFLAGS: $SHARED_CFLAGS + CXXFLAGS: $CXXFLAGS + SHARED_CXXFLAGS: $SHARED_CXXFLAGS + SHARED_CXXLINKFLAGS:$SHARED_LINKCXXFLAGS + CPPFLAGS: $CPPFLAGS + LDFLAGS: $LDFLAGS + SHARED_LDFLAGS: $SHARED_LDFLAGS + EXTRA_CC_LDFLAGS: $EXTRA_CC_LDFLAGS + EXTRA_CXX_LDFLAGS: $EXTRA_CXX_LDFLAGS + LIBTOOL_LINK_FLAGS: $LIBTOOL_LINK_FLAGS + MGMT_DEFS: $MGMT_DEFS + API_DEFS: $API_DEFS +" >&5 +$as_echo "$as_me: Build option summary: + CC: $CC + CXX: $CXX + CPP: $CPP + CFLAGS: $CFLAGS + SHARED_CFLAGS: $SHARED_CFLAGS + CXXFLAGS: $CXXFLAGS + SHARED_CXXFLAGS: $SHARED_CXXFLAGS + SHARED_CXXLINKFLAGS:$SHARED_LINKCXXFLAGS + CPPFLAGS: $CPPFLAGS + LDFLAGS: $LDFLAGS + SHARED_LDFLAGS: $SHARED_LDFLAGS + EXTRA_CC_LDFLAGS: $EXTRA_CC_LDFLAGS + EXTRA_CXX_LDFLAGS: $EXTRA_CXX_LDFLAGS + LIBTOOL_LINK_FLAGS: $LIBTOOL_LINK_FLAGS + MGMT_DEFS: $MGMT_DEFS + API_DEFS: $API_DEFS +" >&6;} diff --git a/configure.ac b/configure.ac new file mode 100644 index 00000000..aff6549a --- /dev/null +++ b/configure.ac @@ -0,0 +1,2000 @@ +# Process this file with `autoreconf -i` to create a 'configure' file. + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Table of Contents +# 1. INITIALIZATION +# 2. SITE CONFIGURATION +# 3. CHECK FOR PROGRAMS +# 4. CHECK FOR LIBRARIES +# 5. CHECK FOR HEADERS +# 6. OUTPUT FILES +# 7. autoheader TEMPLATES + +# ----------------------------------------------------------------------------- +# 1. INITIALIZATION + +# These first two version numbers are updated automatically on each release. +# Version number is calculated as MAJOR * 1000000 + MINOR * 1000 + MICRO +# Version string is in the form of MAJOR.MINOR.MICRO[sufix] +# +m4_define([TS_VERSION_S],[5.0.1]) +m4_define([TS_VERSION_N],[5000001]) + +AC_INIT([Apache Traffic Server], TS_VERSION_S(), [dev@trafficserver.apache.org], [trafficserver],[http://trafficserver.apache.org]) +AC_PREREQ([2.59]) +AC_CONFIG_AUX_DIR([build/aux]) +AC_CONFIG_SRCDIR([proxy/Main.cc]) +AC_CONFIG_MACRO_DIR([build]) +AM_INIT_AUTOMAKE([-Wall -Werror tar-ustar foreign no-installinfo no-installman subdir-objects 1.9.2]) +AC_CONFIG_HEADERS([lib/ts/ink_autoconf.h]) + +# Configure with --disable-silent-rules to get verbose output. For more info, see +# http://www.gnu.org/software/automake/manual/html_node/Automake-silent_002drules-Option.html +m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) + +AC_DISABLE_STATIC + +# Libtool versioning uses different conventions on different +# platforms. At least on FreeBSD, libtool uses an overly complex +# convention that attempts to solve problems that most people just +# don't have and which just causes confusion for most end users. +# +TS_VERSION_MAJOR=$((TS_VERSION_N() / 1000000 )) +TS_VERSION_MINOR=$(((TS_VERSION_N() / 1000) % 1000 )) +TS_VERSION_MICRO=$((TS_VERSION_N() % 1000 )) +TS_LIBTOOL_MAJOR=`echo $((${TS_VERSION_MAJOR} + ${TS_VERSION_MINOR}))` +TS_LIBTOOL_VERSION=$TS_LIBTOOL_MAJOR:$TS_VERSION_MICRO:$TS_VERSION_MINOR +TS_VERSION_STRING=TS_VERSION_S() +TS_VERSION_NUMBER=TS_VERSION_N() + +# +# Substitute the above version numbers into the various files below. +# +AC_SUBST(TS_LIBTOOL_VERSION) +AC_SUBST(TS_VERSION_STRING) +AC_SUBST(TS_VERSION_NUMBER) +AC_SUBST(TS_VERSION_MAJOR) +AC_SUBST(TS_VERSION_MINOR) +AC_SUBST(TS_VERSION_MICRO) + +dnl Hard-coded top of ink_autoconf.h: +AH_TOP([ +#ifndef _ink_autoconf_h +#define _ink_autoconf_h +]) + +dnl Hard-coded inclusion at the tail end of ink_autoconf.h: +AH_BOTTOM([ + +#endif /* _ink_autoconf_h */ +]) + +# +# Generate ./config.nice for reproducing runs of configure +# +TS_CONFIG_NICE([config.nice]) + +# XXX we can't just use AC_PREFIX_DEFAULT because that isn't subbed in +# by configure until it is too late. Is that how it should be or not? +# Something seems broken here. +AC_PREFIX_DEFAULT([/usr/local/trafficserver]) + +# Get the layout here, so we can pass the required variables to Trafficserver +TS_ENABLE_LAYOUT(TrafficServer, [cachedir docdir]) + +# Reparse the configure arguments so we can override the layout. +TS_PARSE_ARGUMENTS + +# +# Host detection +# +AC_CANONICAL_HOST +HOST_GUESS="$host" +AC_SUBST(HOST_GUESS) + +# +# Build environment +# +build_person="`id -nu`" +build_group="`id -ng`" +build_machine="`uname -n`" +AC_SUBST([build_machine]) +AC_SUBST([build_person]) +AC_SUBST([build_group]) + +AC_ARG_WITH([user], + [AS_HELP_STRING([--with-user],[specify the system user [default=nobody]])], + [ + with_user="$withval" + ],[ + with_user="nobody" + ] +) + +default_group="`id -ng $with_user`" +AC_ARG_WITH([group], + [AS_HELP_STRING([--with-group],[specify the system group [default=nobody]])], + [ + with_group="$withval" + ],[ + with_group=${default_group:-nobody} + ] +) +AC_SUBST([pkgsysuser],[$with_user]) +AC_SUBST([pkgsysgroup],[$with_group]) + +# ----------------------------------------------------------------------------- +# 2. SITE CONFIGURATION + +# +# Debug +# + +AC_MSG_CHECKING([whether to enable debugging]) +AC_ARG_ENABLE([debug], + [AS_HELP_STRING([--enable-debug],[turn on debugging])], + [], + [enable_debug=no] +) +AC_MSG_RESULT([$enable_debug]) + +# +# Enable -Werror. We want this enabled by default for developers, but disabled by default +# for end users (because we don't want released versions to suffer from compiler warning hell). +# +AC_MSG_CHECKING([whether to enable -Werror]) +AC_ARG_ENABLE([werror], + [AS_HELP_STRING([--enable-werror],[turn compiler warnings into errors @<:@default=auto@:>@])], + [], + [ + case $TS_VERSION_STRING in + *dev) enable_werror=yes ;; + *) enable_werror=no ;; + esac + ] +) +AC_MSG_RESULT([$enable_werror]) + + +# +# Fast SDK APIs, this disables the parameter checks (assert) +# on all APIs. +# + +AC_MSG_CHECKING([whether to enable fast SDK APIs]) +AC_ARG_ENABLE([fast-sdk], + [AS_HELP_STRING([--enable-fast-sdk],[enable fast SDK APIs (no input parameter sanity checks)])], + [], + [enable_fast_sdk=no] +) +AC_MSG_RESULT([$enable_fast_sdk]) +TS_ARG_ENABLE_VAR([use], [fast-sdk]) +AC_SUBST(use_fast_sdk) + +# +# Diags +# + +AC_MSG_CHECKING([whether to enable diags]) +AC_ARG_ENABLE([diags], + [AS_HELP_STRING([--disable-diags],[turn off diags])], + [], + [enable_diags=yes] +) +AC_MSG_RESULT([$enable_diags]) +TS_ARG_ENABLE_VAR([use], [diags]) +AC_SUBST(use_diags) + +# +# Build regression tests? +# + +AC_MSG_CHECKING([whether to enable regression tests]) +AC_ARG_ENABLE([tests], + [AS_HELP_STRING([--disable-tests],[turn off regression tests])], + [], + [enable_tests=yes] +) +AC_MSG_RESULT([$enable_tests]) +TS_ARG_ENABLE_VAR([has], [tests]) +AC_SUBST(has_tests) +AM_CONDITIONAL([BUILD_TESTS], [test 0 -ne $has_tests]) + +AC_SUBST([ink_with_modules_local],["-DLOCAL_MANAGER"]) +AC_SUBST([ink_with_modules_process],["-DPROCESS_MANAGER"]) + +# +# Force some static linkage (for testing / development only) +# +AC_MSG_CHECKING([whether to build some static libts (dev only)]) +AC_ARG_ENABLE([static-libts], + [AS_HELP_STRING([--enable-static-libts],[build some static libts (dev only)])], + [], + [enable_static_libts=no] +) +AC_MSG_RESULT([$enable_static_libts]) +TS_ARG_ENABLE_VAR([has],[static-libts]) +AM_CONDITIONAL([STATIC_LIBTS], [test 0 -ne $has_static_libts]) + +# +# Remote Coverity Prevent commit +# +AC_MSG_CHECKING([whether to commit cov defects to remote host]) +AC_ARG_ENABLE([remote-cov-commit], + [AS_HELP_STRING([--enable-remote-cov-commit[=HOST]], [commit cov defects to remote host [HOST=localhost]])], + [], + [enable_remote_cov_commit=localhost] +) +AC_MSG_RESULT([$enable_remote_cov_commit]) +AC_SUBST([enable_remote_cov_commit]) + +# +# WCCP +# +AC_MSG_CHECKING([whether to enable WCCP v2 support]) +AC_ARG_ENABLE([wccp], + [AS_HELP_STRING([--enable-wccp],[enable WCCP v2])], + [], + [enable_wccp=no] +) +AC_MSG_RESULT([$enable_wccp]) +TS_ARG_ENABLE_VAR([has],[wccp]) +AC_SUBST(has_wccp) +AM_CONDITIONAL([BUILD_WCCP], [test 0 -ne $has_wccp]) + +# +# InterimCache +# +AC_MSG_CHECKING([whether to enable interim cache support]) +AC_ARG_ENABLE([interim_cache], + [AS_HELP_STRING([--enable-interim-cache],[enable interim cache])], + [], + [enable_interim_cache=no] +) +AC_MSG_RESULT([$enable_interim_cache]) +TS_ARG_ENABLE_VAR([has],[interim_cache]) +AC_SUBST(has_interim_cache) +AM_CONDITIONAL([BUILD_INTERIM_CACHE], [test 0 -ne $has_interim_cache]) + +# Google profiler +AC_MSG_CHECKING([whether to enable profiler]) +AC_ARG_WITH([profiler], + [AS_HELP_STRING([--with-profiler],[enable support for profiler [default=no]])], + [with_profiler=$withval], + [with_profiler=no] +) +AC_MSG_RESULT([$with_profiler]) + +# +# use eventfd() or pipes +# Found that ec2 is using an older kernel causing eventfd errors. +# Disable eventfd when using ATS on EC2 Fedora. +# +AC_MSG_CHECKING([whether to enable eventfd()]) +AC_ARG_ENABLE([eventfd], + [AS_HELP_STRING([--disable-eventfd],[turn off eventfd and use pipes])], + [], + [enable_eventfd="yes"] +) +AC_MSG_RESULT([$enable_eventfd]) + +# +# use POSIX capabilities instead of user ID switching. +# +AC_MSG_CHECKING([whether to use POSIX capabilities]) +AC_ARG_ENABLE([posix-cap], + [AS_HELP_STRING([--disable-posix-cap],[Use user id switching instead of POSIX capabilities])], + [], + [enable_posix_cap="auto"] +) +AC_MSG_RESULT([$enable_posix_cap]) + +# +# use hwloc library when possible (can be disabled) +# +AC_MSG_CHECKING([whether to use hwloc library]) +AC_ARG_ENABLE([hwloc], + [AS_HELP_STRING([--disable-hwloc],[Don't use the hwloc library])], + [], + [enable_hwloc="yes"] +) +AC_MSG_RESULT([$enable_hwloc]) + +# +# Enble ccache explicitly (it's disabled by default, because of build problems in some cases) +# +AC_MSG_CHECKING([whether to enable ccache]) +AC_ARG_ENABLE([ccache], + [AS_HELP_STRING([--enable-ccache],[Enable ccache (for developers)])], + [], + [enable_ccache="no"] +) +AC_MSG_RESULT([$enable_ccache]) + +# +# Use TPROXY for connection transparency. +# +AC_MSG_CHECKING([whether to enable TPROXY based transparency]) +AC_ARG_ENABLE([tproxy], + [AS_HELP_STRING([--enable-tproxy[[=ARG]]], + [Use TPROXY to enable connection transparency. + 'auto' or omitted for local system default, + 'no' to disable, + 'force' to use built in default, + number to use as IP_TRANSPARENT sockopt. + [default=auto] + ]) + ], + [], + [enable_tproxy="auto"] +) +AC_MSG_RESULT([$enable_tproxy]) + +# +# Disable our freelist implementation, reverting it to whatever +# allocator (malloc, tcmalloc or jemalloc) that is in use. This is +# useful for debugging. +# +AC_MSG_CHECKING([whether to disable freelist]) +AC_ARG_ENABLE([freelist], + [AS_HELP_STRING([--disable-freelist],[turn off freelist and use allocators])], + [], + [enable_freelist="yes"] +) +AC_MSG_RESULT([$enable_freelist]) +TS_ARG_ENABLE_VAR([use], [freelist]) +AC_SUBST(use_freelist) + +# +# The original InkFreeList memory pool could not reclaim itself +# automatically. We can use '--enable-reclaimable-freelist' option +# to enable the reclaiming feature. This option is effective only +# when freelist is enable. +# +if test "x${enable_freelist}" = "xyes"; then + AC_MSG_CHECKING([whether to enable reclaimable freelist]) + AC_ARG_ENABLE([reclaimable-freelist], + [AS_HELP_STRING([--enable-reclaimable-freelist], + [enable reclaimable freelist support (effective only when freelist is enabled)])], + [], + [enable_reclaimable_freelist="no"]) + AC_MSG_RESULT([$enable_reclaimable_freelist]) +else + enable_reclaimable_freelist="no" +fi +TS_ARG_ENABLE_VAR([use], [reclaimable_freelist]) +AC_SUBST(use_reclaimable_freelist) + +# +# Options for SPDY +# +AC_MSG_CHECKING([whether to enable spdy]) +AC_ARG_ENABLE([spdy], + [AS_HELP_STRING([--enable-spdy], [turn on spdy protocol])], + [], + [enable_spdy="no"]) +AC_MSG_RESULT([$enable_spdy]) +TS_ARG_ENABLE_VAR([has], [spdy]) +AC_SUBST(has_spdy) +AM_CONDITIONAL([BUILD_SPDY], [test 0 -ne $has_spdy]) + +# +# Configure how many stats to allocate for plugins. Default is 512. +# +AC_ARG_WITH([max-api-stats], + [AS_HELP_STRING([--with-max-api-stats],[max number of plugin stats [default=512]])], + [max_api_stats=$withval], + [max_api_stats=512] +) +AC_SUBST(max_api_stats) + +# +# Max host name length that we deal with in URLs. +# +AC_ARG_WITH([max-host-name-len], + [AS_HELP_STRING([--with-max-host-name-len],[max host name length [default=256]])], + [max_host_name_len=$withval], + [max_host_name_len=256] +) +AC_SUBST(max_host_name_len) + +# +# EventProcessor thread configurations +# + +AC_ARG_WITH([max-event-threads], + [AS_HELP_STRING([--with-max-event-threads],[max number of event threads [default=4096]])], + [max_event_threads=$withval], + [max_event_threads=4096] +) +AC_SUBST(max_event_threads) + +AC_ARG_WITH([max-threads-per-type], + [AS_HELP_STRING([--with-max-threads-per-type],[max number of threads per event type [default=3072]])], + [max_threads_per_type=$withval], + [max_threads_per_type=3072] +) +AC_SUBST(max_threads_per_type) + +# +# Experimental plugins +# + +AC_MSG_CHECKING([whether to enable experimental plugins]) +AC_ARG_ENABLE([experimental-plugins], + [AS_HELP_STRING([--enable-experimental-plugins],[build experimental plugins])], + [], + [enable_experimental_plugins=no] +) +AC_MSG_RESULT([$enable_experimental_plugins]) +AM_CONDITIONAL([BUILD_EXPERIMENTAL_PLUGINS], [ test "x${enable_experimental_plugins}" = "xyes" ]) + +# +# Example plugins. The example plugins are always built, but not always installed. Installing +# them is useful for QA, but not useful for most users, so we default this to disabled. +# + +AC_MSG_CHECKING([whether to install example plugins]) +AC_ARG_ENABLE([example-plugins], + [AS_HELP_STRING([--enable-example-plugins],[install example plugins])], + [], + [enable_example_plugins=no] +) +AC_MSG_RESULT([$enable_example_plugins]) +AM_CONDITIONAL([BUILD_EXAMPLE_PLUGINS], [ test "x${enable_example_plugins}" = "xyes" ]) + + +# +# Test tools. The test tools are always built, but not always installed. Installing +# them is useful for QA, but not useful for most users, so we default this to disabled. +# + +AC_MSG_CHECKING([whether to install testing tools]) +AC_ARG_ENABLE([test-tools], + [AS_HELP_STRING([--enable-test-tools],[install testing tools])], + [], + [enable_test_tools=no] +) +AC_MSG_RESULT([$enable_test_tools]) +AM_CONDITIONAL([BUILD_TEST_TOOLS], [ test "x${enable_test_tools}" = "xyes" ]) + +# +# build c++ api +# +AC_MSG_CHECKING([whether to build c++ api]) +AC_ARG_ENABLE([cppapi], + [AS_HELP_STRING([--enable-cppapi],[Build the c++ api])], + [], + [enable_cppapi="no"] +) +AC_MSG_RESULT([$enable_cppapi]) +AM_CONDITIONAL([ENABLE_CPPAPI], [ test "x${enable_cppapi}" = "xyes" ]) + +# +# Disable LuaJIT? This is a interrim option, when Lua becomes part of the core +# requirements, we will remove this option. +# +AC_MSG_CHECKING([whether to disable LuaJIT]) +AC_ARG_ENABLE([luajit], + [AS_HELP_STRING([--disable-luajit],[turn off LuaJIT])], + [], + [enable_luajit="yes"] +) +AC_MSG_RESULT([$enable_luajit]) +AM_CONDITIONAL([BUILD_LUAJIT], [ test "x${enable_luajit}" = "xyes" ]) + +# +# Installation directories +# For each var the following is evaluated +# foo Standard variable eg. ${prefix}/foo +# rel_foo Relative to prefix eg. foo +# +TS_SUBST_LAYOUT_PATH([prefix]) +TS_SUBST_LAYOUT_PATH([exec_prefix]) +TS_SUBST_LAYOUT_PATH([bindir]) +TS_SUBST_LAYOUT_PATH([sbindir]) +TS_SUBST_LAYOUT_PATH([libdir]) +TS_SUBST_LAYOUT_PATH([libexecdir]) +TS_SUBST_LAYOUT_PATH([infodir]) +TS_SUBST_LAYOUT_PATH([mandir]) +TS_SUBST_LAYOUT_PATH([sysconfdir]) +TS_SUBST_LAYOUT_PATH([datadir]) +TS_SUBST_LAYOUT_PATH([installbuilddir]) +TS_SUBST_LAYOUT_PATH([includedir]) +TS_SUBST_LAYOUT_PATH([localstatedir]) +TS_SUBST_LAYOUT_PATH([runtimedir]) +TS_SUBST_LAYOUT_PATH([logdir]) +TS_SUBST_LAYOUT_PATH([cachedir]) +TS_SUBST_LAYOUT_PATH([docdir]) + +TS_SUBST([pkgbindir]) +TS_SUBST([pkgsbindir]) +TS_SUBST([pkglibdir]) +TS_SUBST([pkglibexecdir]) +TS_SUBST([pkgsysconfdir]) +TS_SUBST([pkgdatadir]) +TS_SUBST([pkglocalstatedir]) +TS_SUBST([pkgruntimedir]) +TS_SUBST([pkglogdir]) +TS_SUBST([pkgcachedir]) +TS_SUBST([pkgdocdir]) + + +# ----------------------------------------------------------------------------- +# 3. CHECK FOR PROGRAMS + +# Compiler selection: +# +# Implementation note (toc) +# 1) Get default compiler settings (case statement.) +# 2) Check for over-rides of default compiler. +# 3) Set standard CFLAGS, SHARED_CFLAGS, etc. +# 4) (in first kludge mode block...) obtain any further CFLAG-type additions. +# 5) Test compilers with all flags set. + +# AC_PROG can sometimes mangle CFLAGS etc. +# in particular, on Linux they insert -g -O2, here we preserve any user CFLAGS +_ts_saved_CFLAGS="${CFLAGS}" +_ts_saved_CXXFLAGS="${CXXFLAGS}" + +# We force the compiler search list because the default GCC on Darwin cannot build +# Traffic Server. On most (all?) platforms, cc and c++ should be the preferred default +# compiler. +AC_PROG_CC([cc gcc clang icc]) +AC_PROG_CXX([c++ g++ clang++ icpc]) + +AM_PROG_CC_C_O +AC_PROG_CPP +AC_PROG_CXXCPP +AM_PROG_AS + +AX_COMPILER_VENDOR + +CFLAGS="${_ts_saved_CFLAGS}" +CXXFLAGS="${_ts_saved_CXXFLAGS}" + +# AX_CXX_COMPILE_STDCXX_11 requires the current language to be C++. +AC_LANG_PUSH([C++]) + +AX_CXX_COMPILE_STDCXX_11( [noext], [enable_cxx_11_support=yes], [ + enable_cxx_11_support=no + AC_MSG_NOTICE([disabling features that depend on C++11 support]) +]) + +AM_CONDITIONAL([BUILD_HAVE_CXX_11], [ test "x${enable_cxx_11_support}" = "xyes" ]) +if test "x${enable_cxx_11_support}" = "xyes" ; then + AC_DEFINE(HAVE_CXX_11, 1, [Whether the compiler support C++11]) +fi + +# While we are in C++ mode, check for the GNU hash_map extension. +TS_FLAG_HEADERS([unordered_map unordered_set]) +AC_MSG_CHECKING([for ]) +AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([ + #include + #include + ], [ + __gnu_cxx::hash_map map; + ])], + [ have_gnu_cxx_hash_map=yes ], + [ have_gnu_cxx_hash_map=no ] +) + +AC_MSG_RESULT([$have_gnu_cxx_hash_map]) +if test "x${have_gnu_cxx_hash_map}" = "xyes" ; then + AC_DEFINE(HAVE_GNU_CXX_HASH_MAP, 1, [whether __gnu_cxx::hash_map is available]) +fi + +# And check for std::shared_ptr vs std::tr1::shared_ptr +TS_FLAG_HEADERS([unordered_map unordered_set]) +AC_MSG_CHECKING([for std::shared_ptr]) +AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([ + #include + using std::shared_ptr; + ], [ + ])], + [ have_std_shared_ptr=yes ], + [ have_std_shared_ptr=no ] +) + +AC_MSG_RESULT([$have_std_shared_ptr]) +if test "x${have_std_shared_ptr}" = "xyes" ; then + AC_DEFINE(HAVE_STD_SHARED_PTR, 1, [whether std::shared_ptr is available]) +fi + +AC_LANG_POP() + +dnl AC_PROG_SED is only available from version 2.6 (released in 2003). CentosOS +dnl 5.9 still has an ancient version, but we have macros that require +dnl AC_PROG_SED. The actual AC_PROG_SED macro does functional checks, but here +dnl we define a trivial local version for times when we are running on +dnl obsoldete autoconf. +ifdef([AC_PROG_SED], [], [ + AC_DEFUN([AC_PROG_SED], [ + AC_CHECK_PROG(SED, sed, sed) + ]) +]) + + +# Various OS specific setup. Note that on Solaris, 32-bit is always the +# default, even on a box that with 64-bit architecture. +# This also sets up a "normalized" variable and define $host_os_def. +defer_accept=1 +case $host_os in + linux*) + host_os_def="linux" + defer_accept=45 + EXTRA_CXX_LDFLAGS="-rdynamic" + ;; + darwin*) + host_os_def="darwin" + ;; + freebsd*) + host_os_def="freebsd" + EXTRA_CXX_LDFLAGS="-rdynamic" + TS_ADDTO(CPPFLAGS, [-I/usr/local/include]) + ;; + kfreebsd*) + host_os_def="freebsd" + EXTRA_CXX_LDFLAGS="-rdynamic" + TS_ADDTO(CPPFLAGS, [-I/usr/local/include]) + TS_ADDTO(CPPFLAGS, [-Dkfreebsd]) + ;; + openbsd*) + host_os_def="openbsd" + ;; + solaris*) + host_os_def="solaris" + case "`isalist`" in + *amd64*) + TS_ADDTO(CFLAGS, -m64) + TS_ADDTO(CXXFLAGS, -m64) + TS_ADDTO(LUA_LDFLAGS, -m64) + ;; + esac + ;; + *) + EXTRA_CXX_LDFLAGS="-rdynamic" + host_os_def=unknown + ;; +esac + +TS_ADDTO(CPPFLAGS, [-D$host_os_def]) +AC_SUBST(defer_accept) + + +dnl AM_PROG_AR is not always available, but it doesn't seem to be needed in older versions. +ifdef([AM_PROG_AR], + [AM_PROG_AR]) + +AC_PROG_AWK +AC_PROG_SED +AC_PROG_LN_S +AC_PROG_INSTALL +AC_PROG_LIBTOOL +AC_CHECK_PROG(RM, rm, rm) +AC_CHECK_PROG(ASCPP, cpp, cpp) +AC_CHECK_TOOL(AR, ar, ar) +AC_ISC_POSIX + +AM_PATH_PYTHON([2.4], [ + dnl action-if-found + TS_MAN1_MANPAGES=`cd $srcdir/doc && $PYTHON manpages.py --section=1 | $AWK '{print "$(BUILDDIR)/man/" $0 }' | tr '\n' ' '` + TS_MAN3_MANPAGES=`cd $srcdir/doc && $PYTHON manpages.py --section=3 | $AWK '{print "$(BUILDDIR)/man/" $0 }' | tr '\n' ' '` + TS_MAN5_MANPAGES=`cd $srcdir/doc && $PYTHON manpages.py --section=5 | $AWK '{print "$(BUILDDIR)/man/" $0 }' | tr '\n' ' '` + TS_MAN8_MANPAGES=`cd $srcdir/doc && $PYTHON manpages.py --section=8 | $AWK '{print "$(BUILDDIR)/man/" $0 }' | tr '\n' ' '` + + # If we have python, check if the Sphinx version looks OK. + AS_IF(["$PYTHON" "$srcdir/doc/checkvers.py" --check-version], [ + sphinx_version_check=yes + ]) + +], [ + dnl action-if-not-found + : +]) + +AC_ARG_VAR(SPHINXBUILD, [the sphinx-build documentation generator]) +AC_ARG_VAR(SPHINXOPTS, [additional sphinx-build options]) +AC_CHECK_PROG([SPHINXBUILD], [sphinx-build], [sphinx-build], [false]) + +AC_SUBST(TS_MAN1_MANPAGES) +AC_SUBST(TS_MAN3_MANPAGES) +AC_SUBST(TS_MAN5_MANPAGES) +AC_SUBST(TS_MAN8_MANPAGES) + +AC_MSG_CHECKING([whether to build man pages]) +AS_IF([test "x$sphinx_version_check" = "xyes" -a "x$SPHINXBUILD" != "xfalse"], [ + AM_CONDITIONAL([BUILD_MANPAGES], [true]) + AC_MSG_RESULT([yes]) +], [ + AM_CONDITIONAL([BUILD_MANPAGES], [false]) + AC_MSG_RESULT([no]) +]) + +# Do bison check by hand because we must do a version check. +# Use YACC because it makes autotools shut up. +BISON_MAJOR=2 +BISON_MINOR=4 +BISON_POINT=1 +AC_CHECK_PROG([YACC],[bison],[bison]) +AS_IF([test -n "$YACC"], + [ bison_version_check=`$YACC --version 2>&1 | \ + $SED -n '/bison/s/^[[^0-9]]*\([[0-9]][[0-9.]]*\).*$/\1/p' | \ + $AWK "{ if (\\$1 > $BISON_MAJOR || (\\$1 == $BISON_MAJOR && (\\$2 > $BISON_MINOR || (\\$2 == $BISON_MINOR && (NR == 2 || \\$3 >= $BISON_POINT))))) print \"yes\"; else printf(\"version %d.%d.%d\",\\$1,\\$2,\\$3); }" FS=. \ + ` + AS_IF([test "x$bison_version_check" != "xyes"], + [ YACC='' + AS_IF([test -z "$bison_version_check"], + [bison_version_check='no version data'] + ) + ] + ) + ], + [ YACC='' + bison_version_check="nothing" + ] +) +# Check lex/flex by hand because we need flex of a sufficient version. +FLEX_MAJOR=2 +FLEX_MINOR=5 +FLEX_POINT=33 +dnl ylwrap requires the lexer executable to be an absolute path or in the srcdir. +dnl but we need various other LEX values. +AC_PROG_LEX +AS_IF([test -n "$LEX"], + [ flex_version_check=`$LEX --version 2>&1 | \ + $SED -n '/flex/s/^[[^0-9]]*\([[0-9]][[0-9.]]*\)[[^0-9]]*.*$/\1/p' | \ + $AWK "{ if (\\$1 > $FLEX_MAJOR || (\\$1 == $FLEX_MAJOR && (\\$2 > $FLEX_MINOR || (\\$2 == $FLEX_MINOR && (NR == 2 || \\$3 >= $FLEX_POINT))))) print \"yes\"; else printf(\"version %d.%d.%d\",\\$1,\\$2,\\$3); }" FS=. \ + ` + AS_IF([test "x$flex_version_check" != "xyes"], + [ LEX='' + AS_IF([test -z "$flex_version_check"], + [flex_version_check='no version data'] + ) + ] + ) + ], + [ LEX='' + flex_version_check="nothing" + ] +) + +AS_IF([test "x$enable_wccp" = "xyes"], + [ + AS_IF([test -z "$LEX" ], + AC_MSG_ERROR([Need flex version $FLEX_MAJOR.$FLEX_MINOR.$FLEX_POINT or better to enable WCCP (found $flex_version_check)]) + ) + + AS_IF([test -z "$YACC" ], + AC_MSG_ERROR([Need bison version $BISON_MAJOR.$BISON_MINOR.$BISON_POINT or better to enable WCCP (found $bison_version_check)]) + ) + + ] +) + +# Check for Perl and Doxygen +AC_PATH_PROG([DOXYGEN], [doxygen]) # needed for Doxygen +AC_PATH_PROG([PERL], [perl],[not found]) +AS_IF([test "x$PERL" = "xnot found"], + [AC_MSG_ERROR([check for perl failed. Have you installed perl?])] +) +AC_ARG_VAR([DOXYGEN], [full path of Doxygen executable]) +AC_ARG_VAR([PERL], [full path of Perl executable]) + +# Check if MakeMaker is available +AX_PROG_PERL_MODULES([ExtUtils::MakeMaker], AM_CONDITIONAL([BUILD_PERL_LIB], [true]), + AM_CONDITIONAL([BUILD_PERL_LIB], [false]) +) + +# Check for GNU-style -On optimization flags +AC_MSG_CHECKING([checking whether to auto-set compiler optimization flags]) +has_optimizer_flags=`echo "$CFLAGS $CXXFLAGS" | ${AWK} '$0 !~ /-O.?/{print "no"}'` +AS_IF([test "x${has_optimizer_flags}" = "xno"], + [ + optimizing_flags='-O3' + AC_MSG_RESULT([yes ${optimizing_flags}]) + ], + [ + has_optimizer_flags='yes' + optimizing_flags='' + AC_MSG_RESULT([no]) + ] +) + +AM_CONDITIONAL([BUILD_HAVE_LIBCXX], [ false ]) + +case $host_os_def in + linux) + AS_IF([test "x$ax_cv_c_compiler_vendor" = "xintel"], [ + # -Wall goes crazy, so turned these specific checks off for now: + # + # 111 is "statement is unreachable" + # 279 is "controlling expression is constant", triggered by our asserts + # 383 is "value copied to temporary, reference to temporary used" + # 444 is "destructor for base class is not virtual" + # 522 is "function "xyz" redeclared "inline" after being called + # 873 is "has no corresponding operator delete". ToDo: we should fix. + # 981 is "operands are evaluated in unspecified order" + # 1418 is "external function definition with no prior declaration" + # 1419 is "external declaration in primary source file" + # 1572 is "floating-point equality and inequality comparisons are unreliable" + # 1720 is "operator new" has no corresponding member operator delete" + # 2256 is "non-pointer conversion from "int" to "unsigned char" " + # 2259 is "non-pointer conversion from "int" to "unsigned char" " + # + # TODO: We should try to eliminate more of these -wd exclusions. + common_opt="-pipe -Wall -wd111 -wd279 -wd383 -wd522 -wd444 -wd873 -wd981 -wd1418 -wd1419 -wd1572 -wd1720 -wd2256 -wd2259" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimization_flags -axsse4.2 -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + ]) + + AS_IF([test "x$ax_cv_c_compiler_vendor" = "xclang"], [ + debug_opt="-ggdb3 $common_opt -Qunused-arguments" + release_opt="-g $common_opt $optimizing_flags -fno-strict-aliasing -Qunused-arguments" + cxx_opt="-Wno-invalid-offsetof -Qunused-arguments" + ]) + + AS_IF([test "x$ax_cv_c_compiler_vendor" = "xgnu"], [ + # This is useful for finding odd conversions + # common_opt="-pipe -Wall -Wconversion -Wno-sign-conversion" + ts_am_common_flags="-Wunused-parameter" + common_opt="-pipe -Wall" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + # Special options for flex generated .c files + flex_cflags="-Wno-unused-parameter" + ]) + + ;; # linux) + + darwin) + AS_IF([test "x$ax_cv_c_compiler_vendor" = "xclang"], [ + common_opt="-pipe -Wall -Wno-deprecated-declarations -Qunused-arguments" + debug_opt="-g $common_opt" + release_opt="-g $common_opt $optimizing_flags -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + ], [ + AC_MSG_WARN([clang is the only supported compiler on Darwin]) + ]) + + # Darwin needs to use libc++ for any C++11 code. + AM_CONDITIONAL([BUILD_HAVE_LIBCXX], [ true ]) + + # NOTE: This seems semi-kludgy, but useful for MacPorts I think. + AS_IF([test -d /opt/local/include], [ + TS_ADDTO(CPPFLAGS, [-I/opt/local/include]) + ]) + AS_IF([test -d /opt/local/lib], [ + TS_ADDTO(LDFLAGS, [-L/opt/local/lib]) + ]) + + ;; # darwin) + + freebsd|kfreebsd) + AS_IF([test "x$ax_cv_c_compiler_vendor" = "xclang"], [ + common_opt="-pipe -Wall -Qunused-arguments" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof -Qunused-arguments" + ]) + + AS_IF([test "x$ax_cv_c_compiler_vendor" = "xgnu"], [ + common_opt="-pipe -Wall" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + ]) + + AS_IF([test -d /usr/local/lib], [ + TS_ADDTO(LDFLAGS, [-L/usr/local/lib]) + ]) + + ;; # freebsd|kfreebsd) + + solaris) + AS_IF([test "x$ax_cv_c_compiler_vendor" = "xgnu"], [ + common_opt="-pipe -Wall" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + ]) + + ;; # solaris) + + *) + # Not sure what platform this is, but take a stab at some general GCC options ... + AS_IF([test "x$ax_cv_c_compiler_vendor" = "xgnu"], [ + common_opt="-pipe -Wall" + debug_opt="-ggdb3 $common_opt" + release_opt="-g $common_opt $optimizing_flags -feliminate-unused-debug-symbols -fno-strict-aliasing" + cxx_opt="-Wno-invalid-offsetof" + ]) +esac + +# Only add -Werror if the user has requested it. We enable this by default for +# development, disable it by default for release. +AS_IF([test x"$enable_werror" = xyes], [ + TS_ADDTO(release_opt, -Werror) + TS_ADDTO(debug_opt, -Werror) +]) + +cc_oflag_opt=$release_opt +cc_oflag_dbg=$debug_opt +cxx_oflag_opt="$release_opt $cxx_opt $cxx_rel" +cxx_oflag_dbg="$debug_opt $cxx_opt $cxx_dbg" + +AM_CFLAGS="$ts_am_common_flags" +AM_CXXFLAGS="$ts_am_common_flags" +AC_SUBST(AM_CFLAGS) +AC_SUBST(AM_CXXFLAGS) + +AC_SUBST([FLEX_CFLAGS], $flex_cflags) + +SHARED_CFLAGS=-fPIC +SHARED_LDFLAGS=-shared +SHARED_CXXFLAGS=-fPIC +SHARED_CXXLINKFLAGS=-shared + +# +# _Here_ is where we go ahead and add the _optimizations_ to already +# existing CFLAGS/CXXFLAGS if some special values had been set. +# +if test "x${enable_debug}" = "xyes"; then + TS_ADDTO(CFLAGS, [${cc_oflag_dbg}]) + TS_ADDTO(CXXFLAGS, [${cxx_oflag_dbg}]) + TS_ADDTO(CPPFLAGS, [-DDEBUG -D_DEBUG]) +else + TS_ADDTO(CFLAGS, [${cc_oflag_opt}]) + TS_ADDTO(CXXFLAGS, [${cxx_oflag_opt}]) +fi + +# Checks for pointer size +AC_CHECK_SIZEOF([void*]) +if test "x$ac_cv_sizeof_voidp" == "x"; then + AC_ERROR([Cannot determine size of void*]) +fi +AC_SUBST(ac_cv_sizeof_voidp) + + +# +# Here are all the extra linux-specific C(XX)FLAGS additions and +# so forth. +# TODO cpu architecture settings separate from operating system settings +# +cpu_architecture="" +# GCC: add a default march if there is not one set +if test "x${GCC}" = "xyes"; then + if test "${ac_cv_sizeof_voidp}" = "4"; then + case "$host_cpu" in + i?86* | k[5-8]* | pentium* | athlon) + cpu_architecture="-march=i586" + ;; + esac + else + case "$host_cpu" in + x86_64 | amd64) + # XXX: Any need for 64-bit arch flags? + # cpu_architecture="-march=native" + ;; + esac + fi +fi + +# Overrride detected architecture with the user suplied one +# +AC_ARG_WITH(architecture, [AC_HELP_STRING([--with-architecture=ARCH],[use a specific CPU architecture])], +[ + if test "x$withval" != "xyes" && test "x$withval" != "xno"; then + case "$withval" in + -*) + # TODO: In case we are cross compiling some of the provided flags + # should be added to the LDFLAGS + cpu_architecture="$withval" + ;; + *) + cpu_architecture="-march=$withval" + ;; + esac + elif test "x$withval" = "x"; then + AC_MSG_ERROR([--with-architecture requires an param]) + fi +]) + +if test "x$cpu_architecture" != "x"; then + TS_ADDTO(CFLAGS, [$cpu_architecture]) + TS_ADDTO(CXXFLAGS, [$cpu_architecture]) +fi + +# 64-bit LFS support +# +TS_ADDTO(CPPFLAGS, [-D_LARGEFILE64_SOURCE=1]) +if test "${ac_cv_sizeof_voidp}" = "8"; then + TS_ADDTO(CPPFLAGS, [-D_COMPILE64BIT_SOURCE=1]) +else + TS_ADDTO(CPPFLAGS, [-D_FILE_OFFSET_BITS=64]) +fi +TS_ADDTO(CPPFLAGS, [-D_GNU_SOURCE]) +TS_ADDTO(CPPFLAGS, [-D_REENTRANT]) + + +AC_MSG_NOTICE([Build for host OS: $host_os, arch: $host_cpu, optimization: $host_os_def]) + +# +# Note: These are site-specific macro's that do various tests +# on the selected compilers. There was some tunning +# associated with our not wanting to use GNU for _everything_. +# Note: This macro may set certain parameters when run. +# + +# +# CPP 11 API +# TODO: briang, this needs to be translated to a define +# that can be used in the c++ api to enable c++11 api features. +# +#AC_MSG_CHECKING([whether to enable c++11 api]) +#AC_ARG_WITH([cpp11api], +# [AS_HELP_STRING([--with-cpp11api],[enable support for cpp11api [default=auto]])], +# [cpp11api=$withval], +# [cpp11api=$enable_cxx_11_support] +#) +#AC_MSG_RESULT([$cpp11api]) +#AM_CONDITIONAL([BUILD_CPP11API], [test "$cpp11api" = yes]) + +# Check for ccache (if explicitly enabled) +if test "x$enable_ccache" = "xyes"; then + AC_CHECK_PROG([CCACHE],[ccache],[ccache],[]) + if test "x${CCACHE}" = "xccache"; then + CC="$CCACHE $CC" + CXX="$CCACHE $CXX" + fi +fi + +# ----------------------------------------------------------------------------- +# 4. CHECK FOR LIBRARIES + +AC_SEARCH_LIBS([exc_capture_context], [exc], [], []) +AC_SEARCH_LIBS([MLD_demangle_string], [mld], [], []) +AC_SEARCH_LIBS([dlopen], [dl], [], []) +AC_SEARCH_LIBS([socket], [socket], [], []) +AC_SEARCH_LIBS([gethostbyname], [nsl], [], []) + +dnl Linux has pthread symbol stubss in both libc ad libpthread, so it's important to test +dnl specifically for pthread_create() here. +AC_SEARCH_LIBS([pthread_create], [pthread], [], []) +AC_SEARCH_LIBS([clock_gettime], [rt], [], []) +AC_SEARCH_LIBS([clock_gettime], [posix4], [], []) + +dnl XXX The following check incorrectly causes the build to succeed +dnl on Darwin. We should be using AC_SEARCH_LIBS, but rest_init is +dnl actually present in libsystem. We are searching for the library +dnl that contains the full Bind 9 API (which is acutally libresolv). +dnl However, the resolv API uses macros to rename it's APIs to per-version +dnl symbols, so standard autoconf macros cannot reasonably be used to +dnl check for it. We need to write custom macros to detect it properly. +AC_CHECK_LIB([resolv],[res_init],[AC_SUBST([LIBRESOLV],["-lresolv"])]) +AC_CHECK_LIB([resolv],[__putlong],[AC_SUBST([LIBRESOLV],["-lresolv"])]) + +# Test for ncurses. We need to turn off -Werror because the C code in the +# ncurses compile tests does not generate unused variable warnings. +__saved_CFLAGS="$CFLAGS" +TS_REMOVEFROM(CFLAGS, -Werror) +AX_WITH_CURSES +CFLAGS="$__saved_CFLAGS" + +AX_LIB_CURL([7.19], [ + AM_CONDITIONAL([BUILD_TSTOP], [test "x$ax_cv_curses" = "xyes"]) + ], [ + AM_CONDITIONAL([BUILD_TSTOP], [false]) + ] +) + +# +# The header_rewrite module depends on boost. +AX_BOOST_BASE([1.33], + [ AM_CONDITIONAL([BUILD_HAVE_BOOST], [true]) ], + [ AM_CONDITIONAL([BUILD_HAVE_BOOST], [false]) ] +) + +# +# Check for SSL presence and usability +TS_CHECK_CRYPTO + +# +# Check for NextProtocolNegotiation TLS extension support. +TS_CHECK_CRYPTO_NEXTPROTONEG + +# +# Check for ALPN TLS extension support. +TS_CHECK_CRYPTO_ALPN +# +# Check for EC key support. +TS_CHECK_CRYPTO_EC_KEYS + +# +# Check for ServerNameIndication TLS extension support. +TS_CHECK_CRYPTO_SNI + +# +# Check for zlib presence and usability +TS_CHECK_ZLIB + +# +# Check for lzma presence and usability +TS_CHECK_LZMA + +# +# Tcl macros provided by build/tcl.m4 +# +# this will error out if tclConfig.sh is not found +SC_PATH_TCLCONFIG + +# if tclConfig.sh loads properly, assume libraries are there and working +SC_LOAD_TCLCONFIG + +# expect tclConfig.sh to populate TCL_LIB_FLAG and TCL_INCLUDE_SPEC +if test "$host_os_def" == "darwin"; then + TCL_LIB_SPEC="-ltcl" # OSX fails to populate this variable +fi +AC_SUBST([LIBTCL],[$TCL_LIB_SPEC]) + + +if test "x${TCL_INCLUDE_SPEC}" != "x-I/usr/include"; then + TS_ADDTO(CPPFLAGS, [$TCL_INCLUDE_SPEC]) +fi + +# +# Check for XML parser +# +TS_CHECK_XML +if test "x${enable_xml}" != "xyes"; then + AC_MSG_ERROR([Need at least one XML library, --with-expat is supported]) +fi + +AC_CHECK_FUNCS([clock_gettime kqueue epoll_ctl posix_memalign posix_fadvise posix_madvise posix_fallocate inotify_init]) +AC_CHECK_FUNCS([lrand48_r srand48_r port_create strlcpy strlcat sysconf getpagesize]) + +# Check for eventfd() and sys/eventfd.h (both must exist ...) +TS_FLAG_HEADERS([sys/eventfd.h], [ + AS_IF([test "x$enable_eventfd" = "xyes"], [ + AC_CHECK_FUNCS([eventfd]) + ] +)]) + +AC_CHECK_FUNCS(eventfd) + +# +# Check for mcheck_pedantic(3) +# +TS_FLAG_HEADERS(mcheck.h) +AC_CHECK_FUNCS(mcheck_pedantic) + +# +# Check for pcre library +# +TS_CHECK_PCRE +if test "x${enable_pcre}" != "xyes"; then + AC_MSG_ERROR([Cannot find pcre library. Configure --with-pcre=DIR]) +fi + +has_backtrace=0 +# Check for backtrace() support +TS_FLAG_HEADERS([execinfo.h], [has_backtrace=1],[]) +if test "${has_backtrace}" = "1"; then + # FreeBSD requires '/usr/ports/devel/libexecinfo' for gdb style backtrace() support + AC_SEARCH_LIBS([backtrace], [execinfo], [have_backtrace_lib=yes]) +else + AC_MSG_WARN([No backtrace() support found]) +fi +AC_SUBST(execinfoh) +AC_SUBST(has_backtrace) + +use_epoll=0 +use_kqueue=0 +use_port=0 +if test "$ac_cv_func_epoll_ctl" = "yes"; then + use_epoll=1 + have_good_poller=1 + AC_MSG_NOTICE([Using epoll event interface]) +elif test "$ac_cv_func_kqueue" = "yes"; then + use_kqueue=1 + have_good_poller=1 + AC_MSG_NOTICE([Using kqueue event interface]) +elif test "$ac_cv_func_port_create" = "yes"; then + use_port=1 + have_good_poller=1 + AC_MSG_NOTICE([Using port event interface]) +else + AC_MSG_FAILURE([No suitable polling interface found]) +fi +AC_SUBST(use_epoll) +AC_SUBST(use_kqueue) +AC_SUBST(use_port) + +has_profiler=0 +if test "x${with_profiler}" = "xyes"; then + AC_SEARCH_LIBS([ProfilerStart], [profiler], + [AC_SUBST([LIBPROFILER], ["-lprofiler"]) + has_profiler=1 + ], + [AC_MSG_FAILURE([check for profiler failed. Have you installed google-perftools-devel?])], + ) +fi +AC_SUBST(has_profiler) + +# For SunPro 5.2 - we need the demangle symbol for +# ink_stack_trace.cc in lib/ts. Because this +# library is part of the SunPro distribution, we need +# check with the C++ compiler since we might be using +# SunPro CC for the C++ compiler and gcc for the C compiler +AC_LANG_PUSH([C++]) +AC_SEARCH_LIBS([cplus_demangle],[demangle], + [LIBDEMANGLE="-ldemangle" + ] +) +AC_SUBST([LIBDEMANGLE]) +AC_LANG_POP + +AC_MSG_CHECKING(for 128bit CAS support) +AC_LANG_PUSH([C++]) + +# We need to save and restore compiler flags around this whole block. +# TS_TRY_COMPILE_NO_WARNING will save and restore flags, so if we do that in the +# middle, then we can accidentally restore modified flags. +__saved_CXXFLAGS="${CXXFLAGS}" +__saved_CFLAGS="${CFLAGS}" + +has_128bit_cas=0 + +TS_TRY_COMPILE_NO_WARNING([],[ + __int128_t x = 0; + __sync_bool_compare_and_swap(&x,0,10); + ], [ + AC_MSG_RESULT(yes) + has_128bit_cas=1 + ], [ + dnl If 128bit CAS fails, try again with the -mcx16 option. GCC needs this; + dnl clang doesn't; icc is unknown but presumed sane. + TS_ADDTO(CXXFLAGS, [-mcx16]) + TS_ADDTO(CFLAGS, [-mcx16]) + TS_TRY_COMPILE_NO_WARNING([],[ + __int128_t x = 0; + __sync_bool_compare_and_swap(&x,0,10); + ], [ + AC_MSG_RESULT(yes) + has_128bit_cas=1 + ], [ + AC_MSG_RESULT(no) + ]) +]) + +CXXFLAGS="${__saved_CXXFLAGS}" +CFLAGS="${__saved_CFLAGS}" +AC_LANG_POP +AC_SUBST(has_128bit_cas) + +AS_IF([test "x$has_128bit_cas" = "x1"], [ + TS_ADDTO(CFLAGS, [-mcx16]) + TS_ADDTO(CXXFLAGS, [-mcx16]) +]) + +# Check for POSIX capabilities library. +# If we don't find it, disable checking for header. +use_posix_cap=0 +AS_IF([test "x$enable_posix_cap" != "xno"], + AC_SEARCH_LIBS([cap_set_proc],[cap], + [AC_SUBST([LIBCAP], ["-lcap"]) + use_posix_cap=1 + ],[ + AS_IF([test "x$enable_posix_cap" == "xyes"], [ + AC_MSG_FAILURE([POSIX capabilities enabled but system library not found.]) + ],[ + [enable_posix_cap=no] + ] ) + ] + ) +) +AC_SUBST(use_posix_cap) + +# +# If the OS is linux, we can use the '--enable-linux-native-aio' option to +# replace the aio thread mode. Effective only on the linux system. +# + +AC_MSG_CHECKING([whether to enable Linux native AIO]) +AC_ARG_ENABLE([linux-native-aio], + [AS_HELP_STRING([--enable-linux-native-aio], [enable native Linux AIO support @<:@default=no@:>@])], + [enable_linux_native_aio="${enableval}"], + [enable_linux_native_aio=no] +) + +AS_IF([test "x$enable_linux_native_aio" = "xyes"], [ + if test $host_os_def != "linux"; then + AC_MSG_ERROR([Linux native AIO can only be enabled on Linux systems]) + fi + + AC_CHECK_HEADERS([libaio.h], [], + [AC_MSG_ERROR([Linux native AIO requires libaio.h])] + ) + + AC_SEARCH_LIBS([io_submit], [aio], [], + [AC_MSG_ERROR([Linux native AIO requires libaio])] + ) + +]) + +AC_MSG_RESULT([$enable_linux_native_aio]) +TS_ARG_ENABLE_VAR([use], [linux_native_aio]) +AC_SUBST(use_linux_native_aio) + +# Check for hwloc library. +# If we don't find it, disable checking for header. +use_hwloc=0 +AS_IF([test "x$enable_hwloc" = "xyes"], [ + # Use pkg-config, because some distros (*cough* Ubuntu) put hwloc in unusual places. + PKG_CHECK_MODULES([HWLOC], [hwloc], [ + SAVE_LIBS="$LIBS" + LIBS="-lhwloc" + AC_LANG_PUSH([C++]) + AC_MSG_CHECKING([for hwloc C++ linking]) + AC_LINK_IFELSE([ + AC_LANG_PROGRAM([#include ],[hwloc_topology_t t; hwloc_topology_init(&t); hwloc_get_type_depth(t, HWLOC_OBJ_SOCKET);])],[ + use_hwloc=1 + AC_SUBST([HWLOC_CFLAGS]) + AC_SUBST([HWLOC_LIBS]) + AC_MSG_RESULT([yes]) + # Old versions of libhwloc don't have HWLOC_OBJ_PU. + AC_CHECK_DECL(HWLOC_OBJ_PU, + [AC_DEFINE(HAVE_HWLOC_OBJ_PU, 1, [Whether HWLOC_OBJ_PU is available])], [], + [#include ] + ) + ], [ + AC_MSG_RESULT([no]) + AC_MSG_WARN([hwloc not linkable, try --disable-hwloc]) + AC_SUBST([HWLOC_CFLAGS],[""]) + AC_SUBST([HWLOC_LIBS],[""]) + ]) + AC_LANG_POP() + LIBS="$SAVE_LIBS" + ], [ + AC_MSG_WARN([hwloc not found, try --disable-hwloc]) + AC_SUBST([HWLOC_CFLAGS],[""]) + AC_SUBST([HWLOC_LIBS],[""]) + ]) +]) + +AC_SUBST(use_hwloc) + +# +# Check for Maxmind APIs / includes. TODO: Long term, it might make sense to support +# GeoIP as a "helper" plugin, which other plugins can then use. Such a plugin could +# then manage which libraries to use via explicit dlopen()'s. +# +enable_maxmind_geoip=no +TS_SEARCH_LIBRARY([GeoIP_id_by_code], [GeoIP], [ + GEOIP_LIBS=$ts_cv_search_GeoIP_id_by_code + TS_FLAG_HEADERS([GeoIP.h], [ enable_maxmind_geoip=yes ]) +]) + +AC_SUBST(GEOIP_LIBS) +AM_CONDITIONAL([BUILD_GEOIP_PLUGIN], [ test "x${enable_maxmind_geoip}" = x"yes" ]) + +# Right now, the healthcheck plugins requires inotify_init (and friends) +AM_CONDITIONAL([BUILD_HEALTHCHECK_PLUGIN], [ test "$ac_cv_func_inotify_init" = "yes" ]) + +# +# Check for tcmalloc and jemalloc +TS_CHECK_JEMALLOC +TS_CHECK_TCMALLOC + +# +# Check for libreadline/libedit +AX_LIB_READLINE + +# On Darwin LuaJIT requires magic link options, otherwise it will crash in luaL_openlibs() at startup. See +# http://luajit.org/install.html. +case $host_os_def in + darwin) + TS_LUAJIT_LDFLAGS="-Wl,-pagezero_size,10000 -Wl,-image_base,100000000" + ;; +esac + +AC_SUBST(TS_LUAJIT_LDFLAGS) +AC_SUBST(LUA_LDFLAGS) + +# We should be able to build http_load if epoll(2) is available. +AM_CONDITIONAL([BUILD_HTTP_LOAD], [test x"$ac_cv_func_epoll_ctl" = x"yes"]) + +# Check for spdylay library +if test "x${enable_spdy}" = "xyes"; then + PKG_CHECK_MODULES([SPDYLAY],[libspdylay]) +fi + +# ----------------------------------------------------------------------------- +# 5. CHECK FOR HEADER FILES + +TS_FLAG_HEADERS([sys/types.h sys/mman.h]) +TS_FLAG_HEADERS([sys/epoll.h \ + sys/event.h \ + sys/param.h \ + sys/pset.h \ + sched.h \ + pthread.h \ + pthread_np.h \ + machine/endian.h \ + endian.h \ + sys/sysinfo.h \ + sys/systeminfo.h \ + netinet/in.h \ + netinet/in_systm.h \ + netinet/tcp.h \ + sys/ioctl.h \ + sys/byteorder.h \ + sys/sockio.h \ + sys/prctl.h \ + arpa/nameser.h \ + arpa/nameser_compat.h \ + execinfo.h \ + netdb.h \ + ctype.h \ + siginfo.h \ + malloc.h \ + wait.h \ + float.h \ + libgen.h \ + values.h \ + alloca.h \ + cpio.h \ + stropts.h \ + sys/param.h \ + sys/sysmacros.h \ + math.h \ + stdint.h \ + stdbool.h \ + net/ppp_defs.h \ + ifaddrs.h\ + readline/readline.h \ + editline/readline.h ]) + +AC_SUBST(sys_epollh) +AC_SUBST(sys_eventh) +AC_SUBST(machine_endianh) +AC_SUBST(endianh) +AC_SUBST(pthread_nph) +AC_SUBST(sys_paramh) +AC_SUBST(sys_cpuseth) +AC_SUBST(sys_pseth) +AC_SUBST(schedh) +AC_SUBST(netinet_inh) +AC_SUBST(netinet_in_systmh) +AC_SUBST(netinet_tcph) +AC_SUBST(sys_ioctlh) +AC_SUBST(sys_byteorderh) +AC_SUBST(sys_sockioh) +AC_SUBST(sys_sysctlh) +AC_SUBST(sys_sysinfoh) +AC_SUBST(sys_systeminfoh) +AC_SUBST(arpa_ineth) +AC_SUBST(arpa_nameserh) +AC_SUBST(arpa_nameser_compath) +AC_SUBST(execinfoh) +AC_SUBST(netdbh) +AC_SUBST(ctypeh) + +AC_SUBST(siginfoh) +AC_SUBST(malloch) +AC_SUBST(waith) +AC_SUBST(floath) +AC_SUBST(libgenh) +AC_SUBST(valuesh) +AC_SUBST(allocah) +AC_SUBST(cpioh) +AC_SUBST(stroptsh) +AC_SUBST(sys_mounth) +AC_SUBST(sys_paramh) +AC_SUBST(sys_sysmacrosh) +AC_SUBST(mathh) +AC_SUBST(net_ppp_defsh) +AC_SUBST(ifaddrsh) +AC_SUBST(readline_readlineh) + +TS_FLAG_HEADERS([sys/statfs.h sys/statvfs.h sys/disk.h sys/disklabel.h]) + +TS_FLAG_HEADERS([sys/sysctl.h], [], [], + [[#ifdef HAVE_SYS_PARAM_H + #include + #endif + ]]) + +TS_FLAG_HEADERS([sys/cpuset.h], [], [], + [[#ifdef HAVE_SYS_PARAM_H + #include + #endif + ]]) + +TS_FLAG_HEADERS([sys/mount.h], [], [], + [[#ifdef HAVE_SYS_PARAM_H + #include + #endif + ]]) + +TS_FLAG_HEADERS([arpa/inet.h], [], [], + [[#ifdef HAVE_SYS_TYPES_H + #include + #endif + #ifdef HAVE_NETINET_IN_H + #include + #endif + ]]) + +TS_FLAG_HEADERS([netinet/ip.h], [], [], + [[#ifdef HAVE_SYS_TYPES_H + #include + #endif + #ifdef HAVE_NETINET_IN_H + #include + #endif + #ifdef HAVE_NETINET_IN_SYSTM_H + #include + #endif + ]]) + +TS_FLAG_HEADERS([netinet/ip_icmp.h], [], [], + [[#ifdef HAVE_SYS_TYPES_H + #include + #endif + #ifdef HAVE_NETINET_IN_H + #include + #endif + #ifdef HAVE_NETINET_IP_H + #include + #endif + #ifdef HAVE_NETINET_IN_SYSTM_H + #include + #endif + ]]) + +AC_SUBST(netinet_iph) +AC_SUBST(netinet_ip_icmph) + +# Test for additional pthread interfaces. + +# Darwin pthread_setname_np: +AC_MSG_CHECKING([for 1-parameter version of pthread_setname_np()]) +AC_LINK_IFELSE([ + AC_LANG_PROGRAM([ +#if HAVE_PTHREAD_H +#include +#endif +#if PTHREAD_NP_H +#include +#endif + ], [ + pthread_setname_np("conftest"); + ]) + ], [ + AC_DEFINE(HAVE_PTHREAD_SETNAME_NP_1, 1, [Whether the 1 parameter version of pthread_setname_np() is available]) + AC_MSG_RESULT([yes]) + ], [ + AC_MSG_RESULT([no]) +]) + +# Linux pthread_setname_np: +AC_MSG_CHECKING([for 2-parameter version of pthread_setname_np()]) +AC_LINK_IFELSE([ + AC_LANG_PROGRAM([ +#if HAVE_PTHREAD_H +#include +#endif +#if PTHREAD_NP_H +#include +#endif + ], [ + pthread_setname_np(pthread_self(), "conftest"); + ]) + ], [ + AC_DEFINE(HAVE_PTHREAD_SETNAME_NP_2, 1, [Whether the 2 parameter version of pthread_setname_np() is available]) + AC_MSG_RESULT([yes]) + ], [ + AC_MSG_RESULT([no]) +]) + +# BSD pthread_set_name_np: +AC_MSG_CHECKING([for 2-parameter version of pthread_set_name_np()]) +AC_LINK_IFELSE([ + AC_LANG_PROGRAM([ +#if HAVE_PTHREAD_H +#include +#endif +#if PTHREAD_NP_H +#include +#endif + ], [ + pthread_set_name_np(pthread_self(), "conftest"); + ]) + ], [ + AC_DEFINE(HAVE_PTHREAD_SET_NAME_NP_2, 1, [Whether the 2 parameter version of pthread_set_name_np() is available]) + AC_MSG_RESULT([yes]) + ], [ + AC_MSG_RESULT([no]) +]) + +# BSD-derived systems populate the socket length in the structure itself. It's +# redundant to check all of these, but hey, I need the typing practice. +AC_CHECK_MEMBER([struct sockaddr.sa_len], [], [], [#include ]) +AC_CHECK_MEMBER([struct sockaddr_in.sin_len], [], [], [#include ]) +AC_CHECK_MEMBER([struct sockaddr_in6.sin6_len], [], [], [#include ]) + +if test "x${ac_cv_member_struct_sockaddr_sa_len}" = xyes; then + AC_DEFINE(HAVE_STRUCT_SOCKADDR_SA_LEN, 1, + [Whether struct sockaddr_in has the sa_len member]) +fi + +if test "x${ac_cv_member_struct_sockaddr_in_sin_len}" = xyes ; then +AC_DEFINE(HAVE_STRUCT_SOCKADDR_IN_SIN_LEN, 1, + [Whether struct sockaddr_in has the sin_len member]) +fi + +if test "x${ac_cv_member_struct_sockaddr_in6_sin6_len}" = xyes; then + AC_DEFINE(HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN, 1, + [Whether struct sockaddr_in6 has the sin6_len member]) +fi + +if test "x${with_profiler}" = "xyes"; then +TS_FLAG_HEADERS([google/profiler.h \ + ], [], []) +fi + +if test "x${enable_posix_cap}" != "xno"; then + AC_CHECK_HEADERS([sys/capability.h], + [], + [AC_MSG_FAILURE([Found POSIX capabilities library but not the header sys/capability.h. POSIX capabilities are not a required feature, you can disable then with --disable-posix-cap])], + [] + ) +fi + +# Check for high-resolution timestamps in struct stat +AC_CHECK_MEMBERS([struct stat.st_mtimespec.tv_nsec]) +AC_CHECK_MEMBERS([struct stat.st_mtim.tv_nsec]) + + +# +# Configure sockopt value for TPROXY. Look at the enable flag. +# Value 'no' means user forced disable, don't check anything else. +# 'auto' means user didn't say, so silently enable/disable +# based on success. +# A numeric value means enable, don't check, use that value. +# Anything else means user forced, fail if value not found +# in header file. +# We can't just include linux/in.h because it's incompatible with +# netinet/in.h. +# Verify the file exists (is readable), scan for the value we need, +# if found export the value and enable use of the value. +# +ip_transparent=0 +use_tproxy=0 +tproxy_header=/usr/include/linux/in.h +tproxy_usage_enable=" + --enable-tproxy Enable the feature and validate." +tproxy_usage_default=" + --enable-tproxy=force Enable using default sockopt value, no validation." +tproxy_usage_numeric=" + --enable-tproxy=X where X is numeric + Enable, use X for sockopt value, no validation." +tproxy_usage_disable=" + --disable-tproxy Disable feature, no validation." +proxy_usage="$tproxy_usage_enable$tproxy_usage_default$tproxy_usage_numeric$tproxy_usage_disable" + +AC_MSG_CHECKING([whether to enable transparent proxy]) +AS_IF([test "x$enable_tproxy" != "xno"], [ + AS_IF([test "$use_posix_cap" -eq 0], [ + AS_IF([test "x$enable_tproxy" = xauto], [ + AC_MSG_RESULT([no]) + ],[ + AC_MSG_FAILURE([TPROXY feature requires POSIX capabilities.]) + ]) + ],[ + AC_MSG_CHECKING([for TPROXY sockopt IP_TRANSPARENT]) + case "$enable_tproxy" in + [[0-9][0-9]*]) + ip_transparent=$enable_tproxy + use_tproxy=1 + AC_MSG_RESULT([forced to $ip_transparent]) + ;; + force) + ip_transparent=19 + use_tproxy=1 + AC_MSG_RESULT([forced to $ip_transparent]) + ;; + yes|auto) + AS_IF([test -r $tproxy_header], [ + ip_transparent=`$AWK "/^#define[ \t]+IP_TRANSPARENT[ \t]+[0-9]+/{print \\$3}" $tproxy_header` + AS_IF([test "x$ip_transparent" != "x"], [ + use_tproxy=1 + AC_MSG_RESULT([set to $ip_transparent]) + ],[ + ip_transparent=0 + AS_IF([test "x$enable_tproxy" = xauto], [ + AC_MSG_RESULT([no]) + ],[ + AC_MSG_RESULT([failed]) + AC_MSG_FAILURE([tproxy feature enabled but the sockopt value was not found in $tproxy_header. Try one of$tproxy_usage_default$tproxy_usage_numeric$tproxy_usage_disable]) + ]) + ]) + ],[ + AS_IF([test "x$enable_tproxy" = xauto], [ + AC_MSG_RESULT([no]) + ],[ + AC_MSG_RESULT([failed]) + AC_MSG_FAILURE([tproxy feature enabled but the header file $tproxy_header was not readable. Try one of$tproxy_usage_default$tproxy_usage_numeric$tproxy_usage_disable]) + ]) + ]) + ;; + *) + AC_MSG_RESULT([failed]) + AC_MSG_FAILURE([Invalid argument to feature tproxy.$tproxy_usage]) + ;; + esac + ]) +]) + +AC_SUBST(use_tproxy) +AC_SUBST(ip_transparent) + +AC_MSG_CHECKING([for SO_MARK]) +AC_TRY_COMPILE([ +#include +], [ +if( SO_MARK > 0) return 0; +else return 1; +], [has_so_mark=1] +msg=yes, [ +has_so_mark=0 +msg=no ] ) + +AC_MSG_RESULT([$msg]) +AC_SUBST(has_so_mark) + +AC_MSG_CHECKING([for IP_TOS]) +AC_TRY_COMPILE([ +#include +#include +#include +],[ +if( IP_TOS > 0) return 0; +else return 1; +], [has_ip_tos=1] +msg=yes, [ +has_ip_tos=0 +msg=no ] ) + +AC_MSG_RESULT([$msg]) +AC_SUBST(has_ip_tos) + +TS_CHECK_LOOPBACK_IFACE +TS_CHECK_GETHOSTBYNAME_R_STYLE +TS_CHECK_MACRO_IN6_IS_ADDR_UNSPECIFIED + +AC_CHECK_TYPE([struct tcp_info], + [AC_DEFINE(HAVE_STRUCT_TCP_INFO, 1, [whether struct tcp_info is available])], + [], + [[ + #include + #include + ]] +) + +# See if we can build the remap_stats plugin +AS_IF([test "x$enable_experimental_plugins" = xyes], + [ + AC_CHECK_HEADERS([search.h]) + AS_IF([test "x$ac_cv_header_search_h" = "xyes"], + [ + AC_CHECK_TYPE([struct hsearch_data],[],[],[[#include ]]) + AC_CHECK_FUNCS([hcreate_r hsearch_r]) + ]) + ]) +AM_CONDITIONAL([BUILD_REMAP_STATS_PLUGIN], + [ test "x$enable_experimental_plugins" = "xyes" -a "x$ac_cv_header_search_h" = "xyes" -a "x$ac_cv_type_struct_hsearch_data" = "xyes" -a "x$ac_cv_func_hcreate_r" = "xyes" -a "x$ac_cv_func_hsearch_r" = "xyes" ]) + +AM_CONDITIONAL([BUILD_COLLAPSED_CONNECTION_PLUGIN], + [ test "x$enable_experimental_plugins" = "xyes" -a "x${enable_collapsed_connection_plugin}" != "xno" ]) + +# +# use modular IOCORE +# +iocore_include_dirs="-I\$(top_srcdir)/iocore/eventsystem \ +-I\$(top_srcdir)/iocore/net \ +-I\$(top_srcdir)/iocore/aio \ +-I\$(top_srcdir)/iocore/hostdb \ +-I\$(top_srcdir)/iocore/cache \ +-I\$(top_srcdir)/iocore/cluster \ +-I\$(top_srcdir)/iocore/utils \ +-I\$(top_srcdir)/iocore/dns" + +# Testing Framework suffix generation hack +TFW_PACKAGE_SUFFIX=$os_type + +MGMT_DEFS="$MGMT_DEFS -DMGMT_USE_SYSLOG" + +AC_SUBST([API_DEFS]) +AC_SUBST([CC]) +AC_SUBST([CFLAGS]) +AC_SUBST([CXX]) +AC_SUBST([CXXFLAGS]) +AC_SUBST([EXPAT_LDFLAGS]) +AC_SUBST([EXTRA_CC_LDFLAGS]) +AC_SUBST([EXTRA_CXX_LDFLAGS]) +AC_SUBST([LIBTOOL_LINK_FLAGS]) +AC_SUBST([iocore_include_dirs]) +AC_SUBST([LDFLAGS]) +AC_SUBST([MGMT_DEFS]) +AC_SUBST([SHARED_CFLAGS]) +AC_SUBST([SHARED_CXXFLAGS]) +AC_SUBST([SHARED_CXXLINKFLAGS]) +AC_SUBST([SHARED_LDFLAGS]) +AC_SUBST([TFW_PACKAGE_SUFFIX]) + +# ----------------------------------------------------------------------------- +# 6. OUTPUT FILES + +AC_CONFIG_FILES([ + Makefile + cmd/Makefile + cmd/traffic_cop/Makefile + cmd/traffic_line/Makefile + cmd/traffic_top/Makefile + doc/Doxyfile + doc/Makefile + example/Makefile + iocore/Makefile + iocore/aio/Makefile + iocore/cache/Makefile + iocore/cluster/Makefile + iocore/dns/Makefile + iocore/eventsystem/Makefile + iocore/hostdb/Makefile + iocore/net/Makefile + iocore/utils/Makefile + lib/Makefile + lib/perl/Makefile + lib/perl/lib/Apache/TS.pm + lib/records/Makefile + lib/ts/Makefile + lib/ts/apidefs.h + lib/ts/ink_config.h + lib/tsconfig/Makefile + lib/wccp/Makefile + mgmt/Makefile + mgmt/api/Makefile + mgmt/api/include/Makefile + mgmt/cluster/Makefile + mgmt/stats/Makefile + mgmt/utils/Makefile + mgmt/web2/Makefile + plugins/Makefile + plugins/cacheurl/Makefile + plugins/conf_remap/Makefile + plugins/gzip/Makefile + plugins/header_rewrite/Makefile + plugins/libloader/Makefile + plugins/regex_remap/Makefile + plugins/stats_over_http/Makefile + plugins/tcpinfo/Makefile + proxy/Makefile + proxy/api/ts/Makefile + proxy/config/Makefile + proxy/config/body_factory/Makefile + proxy/config/body_factory/default/Makefile + proxy/config/records.config.default + proxy/config/storage.config.default + proxy/congest/Makefile + proxy/hdrs/Makefile + proxy/http/Makefile + proxy/http/remap/Makefile + proxy/logging/Makefile + proxy/shared/Makefile + proxy/spdy/Makefile + rc/Makefile + rc/trafficserver + rc/trafficserver.conf + rc/trafficserver.service + rc/trafficserver.xml + tools/Makefile + tools/tsxs +]) + +AS_IF([test "x$enable_experimental_plugins" = xyes], [ + AC_CONFIG_FILES([ + plugins/experimental/Makefile + plugins/experimental/authproxy/Makefile + plugins/experimental/background_fetch/Makefile + plugins/experimental/balancer/Makefile + plugins/experimental/buffer_upload/Makefile + plugins/experimental/channel_stats/Makefile + plugins/experimental/collapsed_connection/Makefile + plugins/experimental/custom_redirect/Makefile + plugins/experimental/escalate/Makefile + plugins/experimental/esi/Makefile + plugins/experimental/geoip_acl/Makefile + plugins/experimental/healthchecks/Makefile + plugins/experimental/hipes/Makefile + plugins/experimental/metalink/Makefile + plugins/experimental/remap_stats/Makefile + plugins/experimental/regex_revalidate/Makefile + plugins/experimental/s3_auth/Makefile + plugins/experimental/stale_while_revalidate/Makefile + plugins/experimental/ts_lua/Makefile + plugins/experimental/url_sig/Makefile + plugins/experimental/xdebug/Makefile +])]) + +AS_IF([test "x$enable_cppapi" = xyes], [ + AC_CONFIG_FILES([ + lib/atscppapi/Makefile + lib/atscppapi/examples/Makefile + lib/atscppapi/examples/async_http_fetch/Makefile + lib/atscppapi/examples/async_timer/Makefile + lib/atscppapi/examples/clientredirect/Makefile + lib/atscppapi/examples/clientrequest/Makefile + lib/atscppapi/examples/customresponse/Makefile + lib/atscppapi/examples/boom/Makefile + lib/atscppapi/examples/globalhook/Makefile + lib/atscppapi/examples/gzip_transformation/Makefile + lib/atscppapi/examples/helloworld/Makefile + lib/atscppapi/examples/intercept/Makefile + lib/atscppapi/examples/internal_transaction_handling/Makefile + lib/atscppapi/examples/logger_example/Makefile + lib/atscppapi/examples/multiple_transaction_hooks/Makefile + lib/atscppapi/examples/null_transformation_plugin/Makefile + lib/atscppapi/examples/post_buffer/Makefile + lib/atscppapi/examples/remap_plugin/Makefile + lib/atscppapi/examples/serverresponse/Makefile + lib/atscppapi/examples/stat_example/Makefile + lib/atscppapi/examples/timeout_example/Makefile + lib/atscppapi/examples/transactionhook/Makefile + lib/atscppapi/src/Makefile +])]) + + +# ----------------------------------------------------------------------------- +# 7. autoheader TEMPLATES + +AC_OUTPUT + +AC_MSG_NOTICE([Build option summary: + CC: $CC + CXX: $CXX + CPP: $CPP + CFLAGS: $CFLAGS + SHARED_CFLAGS: $SHARED_CFLAGS + CXXFLAGS: $CXXFLAGS + SHARED_CXXFLAGS: $SHARED_CXXFLAGS + SHARED_CXXLINKFLAGS:$SHARED_LINKCXXFLAGS + CPPFLAGS: $CPPFLAGS + LDFLAGS: $LDFLAGS + SHARED_LDFLAGS: $SHARED_LDFLAGS + EXTRA_CC_LDFLAGS: $EXTRA_CC_LDFLAGS + EXTRA_CXX_LDFLAGS: $EXTRA_CXX_LDFLAGS + LIBTOOL_LINK_FLAGS: $LIBTOOL_LINK_FLAGS + MGMT_DEFS: $MGMT_DEFS + API_DEFS: $API_DEFS +]) diff --git a/contrib/install_trafficserver.sh b/contrib/install_trafficserver.sh new file mode 100644 index 00000000..d7e0ad7c --- /dev/null +++ b/contrib/install_trafficserver.sh @@ -0,0 +1,401 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# Quick Build/Test for TrafficServer +# Tested on Ubuntu Karmic 9.10, EC2 Friendly with official Canonical AMIs +# Author: Jason Giedymin +# Desc: The intent with this script is to unify a single build script with +# standard options for testing. +# +# Version Information: +# v0.1.1a - Added Debug +# v1.0.0 - Stable Release +# v1.0.1 - Added sqlite dev lib. +# v1.0.2 - EC2 Note about ephemeral storage +# - Create ephemeral storage by mimicking on non EC2 systems. +# v1.0.3 - Added fedora to list of supported distros +# v1.0.4 - Added fedora EC2 compatibility +# - Added EC2 detection, option + +# It's safe to use this in a non-ec2 environment. This directory will be +# created if it doesn't exist. If your running this on EC2, it's best not +# to change this location as this is the ephemeral drive setup by Amazon. +# For non-ec2 environments, you may change this to any location you +# desire. +EC2_EPHEMERAL=/mnt +PROJECT=trafficserver + +BRANCH=traffic/trunk +SVN_LOC=traffic-trunk.svn +SVN_HOME=http://svn.apache.org/repos/asf/incubator + +FALSE=0 +TRUE=1 +DEBUG=$FALSE +USING_EC2=$FALSE +BUILD_HOME=/usr/local +PREFIX=--prefix=$BUILD_HOME +BUILD_OPTIONS="$PREFIX" +BUILD_OPTIONS_DEBUG="--with-user=root --with-group=root --enable-debug" +BUILD_OPTIONS_FC8_EC2="--disable-eventfd" +CONFIGURE_OPTIONS="" +FULL_BUILD_PATH=$EC2_EPHEMERAL/$PROJECT/$SVN_LOC + +SUSE="suse" +FEDORA="fedora" +REDHAT="redhat" # also exists on Fedora +UBUNTU="ubuntu" +DEBIAN="debian" # also exists on Ubuntu +SLACKWARE="slackware" + + +function killAll() { + killall traffic_cop + killall traffic_manager + killall traffic_server +} + +function updateInstall() { + if [ "$DISTRIB_ID" = "$UBUNTU" ]; then + apt-get update + apt-get install -y g++ autoconf \ + make \ + libtool \ + libssl-dev \ + tcl-dev \ + libexpat1-dev \ + libpcre3-dev \ + curl + apt-get install -y subversion git git-svn + elif [ "$DISTRIB_ID" = "$FEDORA" ]; then + yum update + yum upgrade + + # Chose not to use kernel version here as FC8 xen needs more than just eventfd work + if [ $USING_EC2 = $TRUE ]; then + yum -y install subversion \ + git \ + autoconf \ + automake \ + libtool \ + gcc-c++ \ + glibc-devel \ + openssl-devel \ + tcl-devel \ + expat-devel \ + db4-devel \ + pcre \ + pcre-devel + elif [ $USING_EC2 = $FALSE ]; then + yum -y install subversion \ + git \ + autoconf \ + automake \ + libtool \ + gcc-c++ \ + glibc-devel \ + openssl-devel \ + tcl-devel \ + expat-devel \ + pcre \ + pcre-devel + fi + fi +} + +function cleanUp() { + if [ -e $EC2_EPHEMERAL/$PROJECT ]; then + rm -R $EC2_EPHEMERAL/$PROJECT + fi + + if [ ! -d $EC2_EPHEMERAL ]; then + mkdir -p $EC2_EPHEMERAL + cd $EC2_EPHEMERAL + fi +} + +function svnCheckout() { + #----------------SVN Only------------------------------ + svn checkout $SVN_HOME/$PROJECT/$BRANCH $FULL_BUILD_PATH + cd $FULL_BUILD_PATH + #------------------------------------------------------ +} + +#This is just for some dev/testing, and still in the 'works' +function dev() { + #----------------Git Only------------------------------ + git clone git://git.apache.org/trafficserver.git + cd $EC2_EPHEMERAL/$trafficserver + + #swtich to dev build + git checkout -b remotes/origin/dev + #------------------------------------------------------ +} + +function handleGroups() { + + # maybe someday some extra security can be put in around here + # and yeah, force add the group + if [ "$DISTRIB_ID" = "$UBUNTU" ]; then + addgroup nobody + elif [ "$DISTRIB_ID" = "$FEDORA" ]; then + groupadd nobody + fi +} + +function getConfigureOptions() { + configureOptions="$BUILD_OPTIONS" + + if [ $DEBUG = $TRUE ]; then + configureOptions="$configureOptions $BUILD_OPTIONS_DEBUG" + fi + + if [ $USING_EC2 = $TRUE ]; then + if [ "$DISTRIB_ID" = "$FEDORA" ]; then + configureOptions="$configureOptions $BUILD_OPTIONS_FC8_EC2" + fi + fi + + CONFIGURE_OPTIONS=$configureOptions +} + +function rebuild() { + # remake, clean, uninstall first + + if [ ! -d $FULL_BUILD_PATH ]; then + echo "Can't find $FULL_BUILD_PATH, cannot continue!"; + exit 1; + fi + + handleGroups + + cd $FULL_BUILD_PATH + autoreconf -i --force + ./configure $CONFIGURE_OPTIONS + + make clean + + # Here is where things are dumb. We don't check for + # successful builds yet. Thats in the next release. + # This is why I call it dumb. + make + make uninstall + make install +} + +function postMods() { + # Flag verbose on, we like verbose + if [ -e /etc/default/rcS ]; then + sed -i 's/VERBOSE=no/VERBOSE=yes/g' /etc/default/rcS + fi + + # Link the script for init purposes, makes things nice + if [ -e $BUILD_HOME/bin/trafficserver ]; then + ln -s -f $BUILD_HOME/bin/trafficserver /etc/init.d/trafficserver + fi +} + +function freshBuild() { + clear + echo "Starting TrafficServer Install (dumb) process..." + + killAll + updateInstall + cleanUp + svnCheckout + rebuild + postMods + + echo; + echo "TrafficServer Install (dumb) process complete." +} + +function forceBuild() { + clear + echo "Starting Build Only..." + + killAll + updateInstall + rebuild + postMods + + echo "Build complete." +} + +function flipDebug() { + if [ $DEBUG = $TRUE ]; then + DEBUG=$FALSE + elif [ $DEBUG = $FALSE ]; then + DEBUG=$TRUE + fi + + getConfigureOptions; +} + +function flipEC2() { + if [ $USING_EC2 = $TRUE ]; then + USING_EC2=$FALSE + elif [ $USING_EC2 = $FALSE ]; then + USING_EC2=$TRUE + fi + + getConfigureOptions; +} + +# Crude but it works without complex regex, and some people remove ec2/ami tools for security... +function detectEC2() { + if [ -e /etc/ec2_version ]; then #UBUNTU + USING_EC2=$TRUE + elif [ -e /etc/ec2/release-notes ]; then #FEDORA + USING_EC2=$TRUE + fi +} + +function askUser() { +usage; +echo; + +usageLine; + +read -p "" RESPONSE + +if [ "$RESPONSE" = "freshBuild" ]; then + $RESPONSE; + exit 0; +elif [ "$RESPONSE" = "forceBuild" ]; then + $RESPONSE; + exit 0; +elif [ "$RESPONSE" = "flipDebug" ]; then + $RESPONSE; + askUser; +elif [ "$RESPONSE" = "flipEC2" ]; then + $RESPONSE; + askUser; +elif [ "$RESPONSE" = "EXIT" ]; then + echo "Exiting NOW!" + exit 0; +else + #usageLine; + askUser; + return 1; +fi + +} + +function usage() { + clear + echo; + echo 'This script is used for doing quick builds & Tests for TrafficServer.'; + echo; + displayInfo; + echo; + echo "Commands:"; + echo 'freshBuild: Checkout from svn, build and install.'; + echo 'forceBuild: Do a build from previous checked out source.'; + echo 'flipDebug: Flip the current debug mode.'; + echo 'flipEC2: Flip the current EC2 mode.'; + echo 'EXIT: Exit now!'; + echo; +} + +function displayInfo() { + #Would like to make these editable in the next release. + + echo "-----------------------------------------------------------------------" + echo " Current Options " + echo "-----------------------------------------------------------------------" + echo " OS: $DISTRIB_ID" + + if [ $DEBUG = $TRUE ]; then + echo " Debug Mode: ON" + else + echo " Debug Mode: OFF" + fi + + if [ $USING_EC2 = $TRUE ]; then + echo " EC2 Mode: ON" + else + echo " EC2 Mode: OFF" + fi + + echo " Source checkout: $EC2_EPHEMERAL/$PROJECT" + echo " Branch: $BRANCH" + echo " SVN Server: $SVN_HOME" + echo " Configure Options: $CONFIGURE_OPTIONS" + echo " Full Build Path: $FULL_BUILD_PATH" + echo "-----------------------------------------------------------------------" +} + +function usageLine() { + echo; + echo "You can access the menu by calling this script or by command line." + echo "Menu usage and choices are: {freshBuild|forceBuild|flipDebug|flipEC2|EXIT}." + echo "Command line usage choices are: {freshBuild|forceBuild|freshDebugBuild|EXIT}." + echo; + echo "Notes:" + echo " - the command line has a strict debug build option." + echo " - when using the command line build, EC2 detection is automatic." + echo; +} + +#------------Main------------ + +if [ $UID != 0 ] ; then + echo "Must have root permissions to execute." + exit 1 +fi + +if [ -e /etc/SuSE-release ]; then + DISTRIB_ID=$SUSE +elif [ -e /etc/fedora-release ]; then + DISTRIB_ID=$FEDORA +elif [ -e /etc/redhat-release ]; then + DISTRIB_ID=$REDHAT # also exists on Fedora +elif [ -e /etc/lsb-release ]; then + DISTRIB_ID=$UBUNTU +elif [ -e /etc/debian-version ]; then + DISTRIB_ID=$DEBIAN # also exists on Ubuntu +elif [ -e /etc/slackware-version ]; then + DISTRIB_ID=$SLACKWARE +fi + +detectEC2; +getConfigureOptions; +displayInfo; + +case "$1" in + freshBuild) + $1; + ;; + forceBuild) + $1; + ;; + freshDebugBuild) + $DEBUG=$TRUE + freshBuild; + ;; + EXIT) + echo 'Exiting...'; + exit 0; + ;; + *) + askUser; + ;; +esac + +exit 0; diff --git a/contrib/make-ssl-multicert b/contrib/make-ssl-multicert new file mode 100755 index 00000000..901c57b9 --- /dev/null +++ b/contrib/make-ssl-multicert @@ -0,0 +1,47 @@ +#! /usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +# make-ssl-multicert: generate arbitrary numbers of SSL certificates +# Generate up to $COUNT SSL certificates and the corresponding ssl_multicert.config + +OPENSSL=${OPENSSL:-openssl} +COUNT=${COUNT:-100001} +NPROCS=${NPROC:-$(getconf _NPROCESSORS_ONLN)} + +makecert() { + name=$1 + + $OPENSSL genrsa -out ${name}.key 2048 + $OPENSSL req -new -key ${name}.key -out ${name}.csr \ + -subj /C=US/ST=CA/L=Norm/O=TrafficServer/OU=Test/CN=${name}.com + $OPENSSL x509 -req -days 365 \ + -in ${name}.csr -signkey ${name}.key -out ${name}.crt + cat ${name}.crt ${name}.key > ${name}.pem + rm -rf ${name}.csr ${name}.key ${name}.crt +} + +for (( i = 0 ; i < $COUNT ; i += $NPROCS )); do + + for ((j = i; j < (i + $NPROCS) && j < $COUNT; j++ )) ; do + makecert "test${j}" & + done + + wait + + for ((j = i; j < (i + $NPROCS) && j < $COUNT; j++ )) ; do + echo ssl_cert_name=test$j.pem + done >> ssl_multicert.config +done diff --git a/contrib/manifests/debian.pp b/contrib/manifests/debian.pp new file mode 100644 index 00000000..dcf94520 --- /dev/null +++ b/contrib/manifests/debian.pp @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Base ATS build dependencies. +package {[ + 'gcc', 'g++', 'automake', 'autoconf', 'libtool', 'pkg-config', + 'libssl-dev', 'tcl-dev', 'libexpat1-dev', 'libpcre3-dev', 'libhwloc-dev', + 'libcurl3-dev', 'libncurses5-dev', 'libaio-dev', + 'libcap-dev', 'libcap2', 'bison', 'flex', 'make', + 'libmodule-install-perl', + ]: + ensure => latest +} + +# Development extras. +package {[ + 'gdb', 'valgrind', 'git', 'ack-grep', 'curl', 'tmux', 'screen', + 'ccache', 'python-sphinx', + ]: + ensure => latest +} + +# if there is clang-3.4 available, install it: +if $::lsbdistcodename == 'saucy' { + package {[ + 'clang-3.4', 'clang-format-3.4' + ]: + ensure => latest, + } +} diff --git a/contrib/manifests/omnios.sh b/contrib/manifests/omnios.sh new file mode 100644 index 00000000..75b5a4fa --- /dev/null +++ b/contrib/manifests/omnios.sh @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +pkg set-publisher -g http://pkg.omniti.com/omniti-ms/ ms.omniti.com + +pkg refresh + +# Base ATS build dependencies. +pkg install \ + developer/gcc46 \ + developer/object-file \ + developer/linker \ + developer/library/lint \ + system/header \ + system/library/math/header-math \ + developer/lexer/flex \ + developer/parser/bison \ + developer/build/libtool \ + developer/versioning/git \ + omniti/runtime/tcl-8 \ + developer/build/automake-111 \ + developer/build/autoconf \ + developer/build/gnu-make || true + +pkg update || true diff --git a/contrib/manifests/redhat.pp b/contrib/manifests/redhat.pp new file mode 100644 index 00000000..b80374b5 --- /dev/null +++ b/contrib/manifests/redhat.pp @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# These packages really need to be installed before anything happens. +package {['nfs-utils', 'redhat-lsb']: + ensure =>latest +} + +$source = $lsbmajdistrelease ? { + '5' => 'http://mirror.pnl.gov/epel/5/i386/epel-release-5-4.noarch.rpm', + '6' => 'http://mirror.pnl.gov/epel/6/i386/epel-release-6-8.noarch.rpm', + default => 'http://mirror.pnl.gov/epel/7/i386/epel-release-7-2.noarch.rpm', +} + +package {'epel-release': + source => $source, + provider => rpm, + ensure => present, +} + +# Base ATS build dependencies. +package {[ + 'gcc', 'gcc-c++', 'automake', 'autoconf', 'libtool', 'pkgconfig', + 'openssl-devel', 'tcl-devel', 'expat-devel', 'pcre-devel', + 'ncurses-devel', 'libaio-devel', + 'hwloc-devel', 'libcap-devel', 'bison', 'flex', 'make', + ]: + ensure => latest +} + +package { $lsbmajdistrelease ? { '5' => 'curl-devel', default => 'libcurl-devel'}: + ensure => latest +} + +# RHEL/CentOS 5 doesn't have MakeMaker and I don't know what package to use instead :( +if $lsbmajdistrelease != '5' { + package{'perl-ExtUtils-MakeMaker': + ensure => latest + } +} + +# development extras. +package {[ + 'gdb', 'valgrind', 'git', 'curl', 'screen', 'ccache' + ]: + ensure => latest, + require => Package['epel-release'] +} diff --git a/contrib/python/compare_RecordsConfigcc.py b/contrib/python/compare_RecordsConfigcc.py new file mode 100644 index 00000000..80c74123 --- /dev/null +++ b/contrib/python/compare_RecordsConfigcc.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import re +import sys +import string + +try: + src_dir = sys.argv[1] +except IndexError: + print "Usage: %s [trafficserver_source_dir]" % sys.argv[0] + print "Compares values in RecordsConfig.cc with the default records.config file" + sys.exit(1) + +# We expect these keys to differ between files, so ignore them +ignore_keys = { + "proxy.config.ssl.server.cert.path": 1, + "proxy.config.admin.user_id": 1, + "proxy.config.ssl.client.cert.path": 1, + "proxy.config.alarm_email": 1, + "proxy.config.log.logfile_dir": 1, + "proxy.config.ssl.CA.cert.path": 1, + "proxy.config.ssl.server.private_key.path": 1, + "proxy.config.ssl.client.CA.cert.path": 1, + "proxy.config.ssl.server.private_key.path": 1, + "proxy.config.ssl.client.CA.cert.path": 1, + "proxy.config.config_dir": 1, + "proxy.config.proxy_name": 1, + "proxy.config.cluster.ethernet_interface": 1, + "proxy.config.ssl.client.private_key.path": 1, + "proxy.config.net.defer_accept": 1 # Specified in RecordsConfig.cc funny +} + +rc_cc = {} # RecordsConfig.cc values +rc_in = {} # records.config.in values +rc_doc = {} # documented values + +# Process RecordsConfig.cc +with open("%s/mgmt/RecordsConfig.cc" % src_dir) as fh: + cc_re = re.compile(r'\{RECT_(?:CONFIG|LOCAL), "([^"]+)", RECD_([A-Z]+), (.+?), ') + for line in fh: + m = cc_re.search(line) + if m: + value = m.group(3) + value = string.lstrip(value, '"') + value = string.rstrip(value, '"') + rc_cc[m.group(1)] = (m.group(2), value) + +# Process records.config.default.in +with open("%s/proxy/config/records.config.default.in" % src_dir) as fh: + in_re = re.compile(r'(?:CONFIG|LOCAL) (\S+)\s+(\S+)\s+(\S+)') + for line in fh: + m = in_re.match(line) + if m: + rc_in[m.group(1)] = (m.group(2), m.group(3)) + +# Process records.comfig documentation. +# eg. .. ts:cv:: CONFIG proxy.config.proxy_binary STRING traffic_server +with open("%s/doc/reference/configuration/records.config.en.rst" % src_dir) as fh: + doc_re = re.compile(r'ts:cv:: CONFIG (\S+)\s+(\S+)\s+(\S+)') + for line in fh: + m = doc_re.search(line) + if m: + rc_doc[m.group(1)] = (m.group(2), m.group(3)) + rc_doc[m.group(1)] = (m.group(2), m.group(3)) + +# Compare the two +# If a value is in RecordsConfig.cc and not records.config.default.in, it is +# ignored right now. +print "# Comparing RecordsConfig.cc -> records.config.default.in" +for key in rc_in: + if key in ignore_keys: + continue + if key not in rc_cc: + print "%s missing -> %s" % (key, "%s %s" % rc_in[key]) + continue + if rc_cc[key] != rc_in[key]: + print "%s : %s -> %s" % (key, "%s %s" % rc_cc[key], "%s %s" % rc_in[key]) + +# Search for undocumented variables ... +missing = [ k for k in rc_cc if k not in rc_doc ] +if len(missing) > 0: + print + print "Undocumented configuration variables:" + for m in sorted(missing): + print "\t%s %s" % (m, "%s %s" % rc_cc[m]) + +# Search for incorrectly documented default values ... +defaults = [ k for k in rc_cc if k in rc_doc and rc_cc[k] != rc_doc[k] ] +if len(defaults) > 0: + print + print "Incorrectly documented defaults:" + for d in sorted(defaults): + print "\t%s %s -> %s" % (d, "%s %s" % rc_cc[d], "%s %s" % rc_doc[d]) + + +# Search for stale documentation ... +stale = [ k for k in rc_doc if k not in rc_cc ] +if (len(stale) > 0): + print + print "Stale documentation:" + for s in sorted(stale): + print "\t%s" %(s) + diff --git a/contrib/python/compare_records_config.py b/contrib/python/compare_records_config.py new file mode 100644 index 00000000..a8283a38 --- /dev/null +++ b/contrib/python/compare_records_config.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Compares two records.config files, printing which items are different and +# what the before/after values are. +# Ignores FLOAT differences and @foo@ values from the source code defaults. +import sys + +def parse_records_file(filename): + fh = open(filename) + settings = {} + for line in fh: + line = line.strip() + if line.startswith('CONFIG') or line.startswith('LOCAL'): + parts = line.split() + if parts[2] == 'FLOAT': + parts[3] = parts[3].rstrip('0') + if parts[2] == 'INT' and parts[3][-1] in 'KMG': + unit = parts[3][-1] + val = parts[3][:-1] + if unit == 'K': + val = int(val) * 1024 + if unit == 'M': + val = int(val) * 1048576 + if unit == 'G': + val = int(val) * 1073741824 + parts[3] = str(val) + try: + settings[parts[1]] = parts[3] + except IndexError: + sys.stderr.write("Skipping malformed line: %s\n" % line) + continue + return settings + +def compare_settings(old, new): + for key in sorted(tuple(set(old) | set(new))): + if key not in old: + old[key] = "missing" + if key not in new: + new[key] = "missing" + + for key in sorted(old): + if old[key].startswith('@') and old[key].endswith('@'): + # Skip predefined values + continue + if old[key] != new[key]: + print "%s %s -> %s" % (key, old[key], new[key]) + +if __name__ == '__main__': + settings_orig = parse_records_file(sys.argv[1]) + settings_new = parse_records_file(sys.argv[2]) + compare_settings(settings_orig, settings_new) diff --git a/contrib/set_trafficserver.sh b/contrib/set_trafficserver.sh new file mode 100644 index 00000000..3182858d --- /dev/null +++ b/contrib/set_trafficserver.sh @@ -0,0 +1,100 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Basic Dev base settings script. +# Desc: This script is meant for EC2 'like' Instances, however it works +# perfectly for non-ec2 installs. This script is intended as a guide and +# makes use of many of the default settings for Apache Traffic Server. +# There are a few comments which offer help for performance, please +# read them. +# Author: Jason Giedymin +# Version Information: +# v1.0.0 - Initial Release +# v1.0.1 - thread limit set to 1 +# v1.0.2 - VM specific settings, getting ready for auto benchmarking. +# v1.0.3 - Check to force /mnt based cache.db + +REMAP_FILE=/usr/local/etc/trafficserver/remap.config +STORAGE_FILE=/usr/local/etc/trafficserver/storage.config +EC2_CACHE_LOC=/mnt/trafficserver_cache + +# Base settings to use for testing and benchmarking +function recordsConfig() { + startServer + + traffic_line -s proxy.config.reverse_proxy.enabled -v 1 + traffic_line -s proxy.config.exec_thread.autoconfig -v 1 + + # Good default on a dedicated box or SMP VM. + #traffic_line -s proxy.config.exec_thread.autoconfig.scale -v 3.000000 + + # Good for a VM. + traffic_line -s proxy.config.exec_thread.autoconfig.scale -v 1.000000 + + traffic_line -s proxy.config.accept_threads -v 1 + traffic_line -s proxy.config.log.logging_enabled -v 0 + traffic_line -s proxy.config.http.server_port -v 8080 + traffic_line -s proxy.config.url_remap.pristine_host_hdr -v 1 + + # Good for a VM. + traffic_line -s proxy.config.exec_thread.limit -v 1 + + # Good default on a dedicated box or SMP VM. + traffic_line -s proxy.config.exec_thread.limit -v 2 +} + +function sampleRemap() { + echo "Modifying $REMAP_FILE ..." + + #This is purely for testing. Please supply your own ports and hosts. + echo "map http://localhost:8080 http://localhost:80" >> $REMAP_FILE + echo "map https://localhost:8443 http://localhost:443" >> $REMAP_FILE +} + +function ec2Cache() { + echo "Modifying $STORAGE_FILE ..." + + if [ ! -d $EC2_CACHE_LOC ]; then + echo "Creating $EC2_CACHE_LOC and Chown-ing $STORAGE_FILE" + mkdir -p $EC2_CACHE_LOC + chown nobody:nobody $STORAGE_FILE + fi + + sed -i 's/.\/var\/trafficserver 150994944/\/mnt\/trafficserver_cache 1073741824/g' $STORAGE_FILE +} + +function startServer() { + # If installed with the install script, use the init.d file created + if [ -x /etc/init.d/trafficserver ]; then + echo "Starting by forcing a restart of Apache TrafficServer..." + /etc/init.d/trafficserver restart + sleep 3 + fi +} + +function start() { + echo "Modifying configs..." + + sampleRemap + ec2Cache + + recordsConfig + + echo "Complete." +} + +start diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in new file mode 100644 index 00000000..28f66458 --- /dev/null +++ b/doc/Doxyfile.in @@ -0,0 +1,1283 @@ +# Doxyfile 1.5.2 +# @configure_input@ + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file that +# follow. The default is UTF-8 which is also the encoding used for all text before +# the first occurrence of this tag. Doxygen uses libiconv (or the iconv built into +# libc) for the transcoding. See http://www.gnu.org/software/libiconv for the list of +# possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = @PACKAGE_NAME@ + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = @PACKAGE_VERSION@ + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = @abs_top_builddir@/doc + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Finnish, French, German, Greek, Hungarian, +# Italian, Japanese, Japanese-en (Japanese with English messages), Korean, +# Korean-en, Lithuanian, Norwegian, Polish, Portuguese, Romanian, Russian, +# Serbian, Slovak, Slovene, Spanish, Swedish, and Ukrainian. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = NO + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like the Qt-style comments (thus requiring an +# explicit @brief command for a brief description. + +JAVADOC_AUTOBRIEF = YES + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the DETAILS_AT_TOP tag is set to YES then Doxygen +# will output the detailed description near the top, like JavaDoc. +# If set to NO, the detailed description appears after the member +# documentation. + +DETAILS_AT_TOP = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for Java. +# For instance, namespaces will be presented as packages, qualified scopes +# will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want to +# include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from the +# version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = warn.log + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = @abs_top_srcdir@/doc/mainpage.doc \ + @abs_top_srcdir@/iocore \ + @abs_top_srcdir@/lib/ts \ + @abs_top_srcdir@/lib/tsconfig \ + @abs_top_srcdir@/lib/records \ + @abs_top_srcdir@/proxy \ + @abs_top_srcdir@/test + +# This tag can be used to specify the character encoding of the source files that +# doxygen parses. Internally doxygen uses the UTF-8 encoding, which is also the default +# input encoding. Doxygen uses libiconv (or the iconv built into libc) for the transcoding. +# See http://www.gnu.org/software/libiconv for the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py + +FILE_PATTERNS = *.c *.cc *.h *.i + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = test_*.cc +EXCLUDE_PATTERNS += test_*.h +EXCLUDE_PATTERNS += test_*.i + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the output. +# The symbol name can be a fully qualified name, a word, or if the wildcard * is used, +# a substring. Examples: ANamespace, AClass, AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. Otherwise they will link to the documentstion. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = api + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +HTML_TIMESTAMP = NO + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each +# generated HTML page will contain the date and time when the page +# was generated. Setting this to NO can help when comparing the +# output of multiple runs. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = letter + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = YES + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = source/doxygen_xml_api + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = @PERL@ + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = NO + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see http://www.mcternan.me.uk/mscgen/) to +# produce the chart and insert it in the documentation. The MSCGEN_PATH tag allows you to +# specify the directory where the mscgen tool resides. If left empty the tool is assumed to +# be found in the default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = YES + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a call dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable call graphs for selected +# functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then doxygen will +# generate a caller dependency graph for every global function or class method. +# Note that enabling this option will significantly increase the time of a run. +# So in most cases it will be better to enable caller graphs for selected +# functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = @abs_top_srcdir@/doc/dot + +# The MAX_DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen will always +# show the root nodes and its direct children regardless of this setting. + +DOT_GRAPH_MAX_NODES = 100 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, which results in a white background. +# Warning: Depending on the platform used, enabling this option may lead to +# badly anti-aliased labels on the edges of a graph (i.e. they become hard to +# read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = NO diff --git a/doc/Makefile.am b/doc/Makefile.am new file mode 100644 index 00000000..966d6018 --- /dev/null +++ b/doc/Makefile.am @@ -0,0 +1,147 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +trafficshelldir = $(pkgdocdir)/trafficshell + +.PHONY: help dirhtml singlehtml epub latex man changes linkcheck doxygen + +dist_trafficshell_DATA = \ + man/show_alarms.1 \ + man/show_cache.1 \ + man/show_cache-stats.1 \ + man/show_cluster.1 \ + man/show_dns-resolver.1 \ + man/show_dns-stats.1 \ + man/show_hostdb.1 \ + man/show_hostdb-stats.1 \ + man/show_http.1 \ + man/show_http-stats.1 \ + man/show_http-trans-stats.1 \ + man/show_icp.1 \ + man/show_icp-stats.1 \ + man/show_logging.1 \ + man/show_logging-stats.1 \ + man/show_network.1 \ + man/show_parent.1 \ + man/show_port-tunnels.1 \ + man/show_proxy.1 \ + man/show_proxy-stats.1 \ + man/show_remap.1 \ + man/show_scheduled-update.1 \ + man/show_security.1 \ + man/show_socks.1 \ + man/show_ssl.1 \ + man/show_status.1 \ + man/show_version.1 \ + man/show_virtual-ip.1 + +if BUILD_MANPAGES + +man1_MANS = $(TS_MAN1_MANPAGES) +man3_MANS = $(TS_MAN3_MANPAGES) +man5_MANS = $(TS_MAN5_MANPAGES) +man8_MANS = $(TS_MAN8_MANPAGES) + +$(man1_MANS) $(man3_MANS) $(man5_MANS) $(man8_MANS): man + +# Hook the 'all' target so that the man pages get generated in the "all" target, prior +# to "make install". If we leave it to "make install" time, then the man pages are likely +# to me generated as root. +all-am: $(man1_MANS) $(man3_MANS) $(man5_MANS) $(man8_MANS) + +endif + +EXTRA_DIST = \ + Doxyfile.in \ + $(dist_trafficshell_DATA) + +clean-local: + -rm -rf html warn.log + -rm -rf $(BUILDDIR)/doctrees $(BUILDDIR)/html $(BUILDDIR)/dirhtml $(BUILDDIR)/singlehtml $(BUILDDIR)/epub $(BUILDDIR)/latex $(BUILDDIR)/man + +doxygen: Doxyfile + $(DOXYGEN) + +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +PAPER = letter +BUILDDIR = docbuild + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = $(SPHINXOPTS) +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(SPHINXOPTS) + +# The PAPER setting variables requires recursive make variable expansion, which automake +# detects as non-portable. We bounce this through a shell script and do the expansion there. +SBUILD = PAPEROPT_a4="$(PAPEROPT_a4)" PAPEROPT_letter="$(PAPEROPT_letter)" PAPER="$(PAPER)" \ + $(srcdir)/sbuild $(SPHINXBUILD) \ + -D version=$(TS_VERSION_STRING) \ + -D release=$(TS_VERSION_STRING) \ + -c $(srcdir) \ + $(ALLSPHINXOPTS) + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " man to make manual pages" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + +html-local: + $(SBUILD) -d $(BUILDDIR)/doctrees -b html $(srcdir) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SBUILD) -d $(BUILDDIR)/doctrees -b dirhtml $(srcdir) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SBUILD) -d $(BUILDDIR)/doctrees -b singlehtml $(srcdir) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +epub: + $(SBUILD) -d $(BUILDDIR)/doctrees -b epub $(srcdir) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SBUILD) -d $(BUILDDIR)/doctrees -b latex $(srcdir) $(BUILDDIR)/latex + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/latex." + +man: + $(SBUILD) -d $(BUILDDIR)/doctrees -b man $(srcdir) $(BUILDDIR)/man + @echo + @echo "Build finished. The man pages are in $(BUILDDIR)/man." + +changes: + @echo FIXME + +linkcheck: + @echo FIXME diff --git a/doc/Makefile.in b/doc/Makefile.in new file mode 100644 index 00000000..c7a4e53b --- /dev/null +++ b/doc/Makefile.in @@ -0,0 +1,1048 @@ +# Makefile.in generated by automake 1.13.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2013 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +VPATH = @srcdir@ +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = doc +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(srcdir)/Doxyfile.in $(dist_trafficshell_DATA) +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/build/ax_boost_base.m4 \ + $(top_srcdir)/build/ax_check_openssl.m4 \ + $(top_srcdir)/build/ax_compare_version.m4 \ + $(top_srcdir)/build/ax_compiler_vendor.m4 \ + $(top_srcdir)/build/ax_cxx_compile_stdcxx_11.m4 \ + $(top_srcdir)/build/ax_lib_curl.m4 \ + $(top_srcdir)/build/ax_lib_readline.m4 \ + $(top_srcdir)/build/ax_path_generic.m4 \ + $(top_srcdir)/build/ax_perl_modules.m4 \ + $(top_srcdir)/build/ax_with_curses.m4 \ + $(top_srcdir)/build/common.m4 $(top_srcdir)/build/crypto.m4 \ + $(top_srcdir)/build/jemalloc.m4 $(top_srcdir)/build/libtool.m4 \ + $(top_srcdir)/build/ltoptions.m4 \ + $(top_srcdir)/build/ltsugar.m4 \ + $(top_srcdir)/build/ltversion.m4 \ + $(top_srcdir)/build/lt~obsolete.m4 $(top_srcdir)/build/lzma.m4 \ + $(top_srcdir)/build/network.m4 $(top_srcdir)/build/pcre.m4 \ + $(top_srcdir)/build/pkg.m4 $(top_srcdir)/build/tcl.m4 \ + $(top_srcdir)/build/tcmalloc.m4 $(top_srcdir)/build/xml.m4 \ + $(top_srcdir)/build/zlib.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/lib/ts/ink_autoconf.h +CONFIG_CLEAN_FILES = Doxyfile +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +man1dir = $(mandir)/man1 +man3dir = $(mandir)/man3 +man5dir = $(mandir)/man5 +man8dir = $(mandir)/man8 +NROFF = nroff +MANS = $(man1_MANS) $(man3_MANS) $(man5_MANS) $(man8_MANS) +am__installdirs = "$(DESTDIR)$(trafficshelldir)" +DATA = $(dist_trafficshell_DATA) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +pkgdatadir = @pkgdatadir@ +pkglibdir = @pkglibdir@ +pkglibexecdir = @pkglibexecdir@ +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_CFLAGS = @AM_CFLAGS@ +AM_CXXFLAGS = @AM_CXXFLAGS@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +API_DEFS = @API_DEFS@ +AR = @AR@ +ASCPP = @ASCPP@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BOOST_CPPFLAGS = @BOOST_CPPFLAGS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ +CC = @CC@ +CCACHE = @CCACHE@ +CCAS = @CCAS@ +CCASDEPMODE = @CCASDEPMODE@ +CCASFLAGS = @CCASFLAGS@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CURL_CFLAGS = @CURL_CFLAGS@ +CURL_CONFIG = @CURL_CONFIG@ +CURL_LIBS = @CURL_LIBS@ +CURSES_LIB = @CURSES_LIB@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +EXPAT_LDFLAGS = @EXPAT_LDFLAGS@ +EXTRA_CC_LDFLAGS = @EXTRA_CC_LDFLAGS@ +EXTRA_CXX_LDFLAGS = @EXTRA_CXX_LDFLAGS@ +FGREP = @FGREP@ +FLEX_CFLAGS = @FLEX_CFLAGS@ +GEOIP_LIBS = @GEOIP_LIBS@ +GREP = @GREP@ +HOST_GUESS = @HOST_GUESS@ +HWLOC_CFLAGS = @HWLOC_CFLAGS@ +HWLOC_LIBS = @HWLOC_LIBS@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LEX = @LEX@ +LEXLIB = @LEXLIB@ +LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@ +LIBCAP = @LIBCAP@ +LIBCRYPT = @LIBCRYPT@ +LIBDEMANGLE = @LIBDEMANGLE@ +LIBEXPAT = @LIBEXPAT@ +LIBLZMA = @LIBLZMA@ +LIBOBJS = @LIBOBJS@ +LIBPCRE = @LIBPCRE@ +LIBPROFILER = @LIBPROFILER@ +LIBREADLINE = @LIBREADLINE@ +LIBRESOLV = @LIBRESOLV@ +LIBS = @LIBS@ +LIBTCL = @LIBTCL@ +LIBTOOL = @LIBTOOL@ +LIBTOOL_LINK_FLAGS = @LIBTOOL_LINK_FLAGS@ +LIBZ = @LIBZ@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LUA_LDFLAGS = @LUA_LDFLAGS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MGMT_DEFS = @MGMT_DEFS@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OPENSSL_INCLUDES = @OPENSSL_INCLUDES@ +OPENSSL_LDFLAGS = @OPENSSL_LDFLAGS@ +OPENSSL_LIBS = @OPENSSL_LIBS@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PCRE_CONFIG = @PCRE_CONFIG@ +PERL = @PERL@ +PKG_CONFIG = @PKG_CONFIG@ +PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ +PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ +PYTHON = @PYTHON@ +PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ +PYTHON_PLATFORM = @PYTHON_PLATFORM@ +PYTHON_PREFIX = @PYTHON_PREFIX@ +PYTHON_VERSION = @PYTHON_VERSION@ +RANLIB = @RANLIB@ +RM = @RM@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHARED_CFLAGS = @SHARED_CFLAGS@ +SHARED_CXXFLAGS = @SHARED_CXXFLAGS@ +SHARED_CXXLINKFLAGS = @SHARED_CXXLINKFLAGS@ +SHARED_LDFLAGS = @SHARED_LDFLAGS@ +SHELL = @SHELL@ +SPDYLAY_CFLAGS = @SPDYLAY_CFLAGS@ +SPDYLAY_LIBS = @SPDYLAY_LIBS@ +SPHINXBUILD = @SPHINXBUILD@ +SPHINXOPTS = @SPHINXOPTS@ +STRIP = @STRIP@ +TCL_BIN_DIR = @TCL_BIN_DIR@ +TCL_LIB_FILE = @TCL_LIB_FILE@ +TCL_LIB_FLAG = @TCL_LIB_FLAG@ +TCL_LIB_SPEC = @TCL_LIB_SPEC@ +TCL_PATCH_LEVEL = @TCL_PATCH_LEVEL@ +TCL_SRC_DIR = @TCL_SRC_DIR@ +TCL_STUB_LIB_FILE = @TCL_STUB_LIB_FILE@ +TCL_STUB_LIB_FLAG = @TCL_STUB_LIB_FLAG@ +TCL_STUB_LIB_SPEC = @TCL_STUB_LIB_SPEC@ +TCL_VERSION = @TCL_VERSION@ +TFW_PACKAGE_SUFFIX = @TFW_PACKAGE_SUFFIX@ +TS_LIBTOOL_VERSION = @TS_LIBTOOL_VERSION@ +TS_LUAJIT_LDFLAGS = @TS_LUAJIT_LDFLAGS@ +TS_MAN1_MANPAGES = @TS_MAN1_MANPAGES@ +TS_MAN3_MANPAGES = @TS_MAN3_MANPAGES@ +TS_MAN5_MANPAGES = @TS_MAN5_MANPAGES@ +TS_MAN8_MANPAGES = @TS_MAN8_MANPAGES@ +TS_VERSION_MAJOR = @TS_VERSION_MAJOR@ +TS_VERSION_MICRO = @TS_VERSION_MICRO@ +TS_VERSION_MINOR = @TS_VERSION_MINOR@ +TS_VERSION_NUMBER = @TS_VERSION_NUMBER@ +TS_VERSION_STRING = @TS_VERSION_STRING@ +VERSION = @VERSION@ +YACC = @YACC@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +ac_cv_sizeof_voidp = @ac_cv_sizeof_voidp@ +allocah = @allocah@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +arpa_ineth = @arpa_ineth@ +arpa_nameser_compath = @arpa_nameser_compath@ +arpa_nameserh = @arpa_nameserh@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_group = @build_group@ +build_machine = @build_machine@ +build_os = @build_os@ +build_person = @build_person@ +build_vendor = @build_vendor@ +builddir = @builddir@ +cachedir = @cachedir@ +cpioh = @cpioh@ +ctypeh = @ctypeh@ +datadir = @datadir@ +datarootdir = @datarootdir@ +default_loopback_iface = @default_loopback_iface@ +defer_accept = @defer_accept@ +docdir = @docdir@ +dvidir = @dvidir@ +enable_remote_cov_commit = @enable_remote_cov_commit@ +endianh = @endianh@ +exec_prefix = @exec_prefix@ +execinfoh = @execinfoh@ +exp_bindir = @exp_bindir@ +exp_cachedir = @exp_cachedir@ +exp_datadir = @exp_datadir@ +exp_docdir = @exp_docdir@ +exp_exec_prefix = @exp_exec_prefix@ +exp_includedir = @exp_includedir@ +exp_infodir = @exp_infodir@ +exp_installbuilddir = @exp_installbuilddir@ +exp_libdir = @exp_libdir@ +exp_libexecdir = @exp_libexecdir@ +exp_localstatedir = @exp_localstatedir@ +exp_logdir = @exp_logdir@ +exp_mandir = @exp_mandir@ +exp_prefix = @exp_prefix@ +exp_runtimedir = @exp_runtimedir@ +exp_sbindir = @exp_sbindir@ +exp_sysconfdir = @exp_sysconfdir@ +expath = @expath@ +floath = @floath@ +gethostbyname_r_glibc2 = @gethostbyname_r_glibc2@ +has_128bit_cas = @has_128bit_cas@ +has_backtrace = @has_backtrace@ +has_in6_is_addr_unspecified = @has_in6_is_addr_unspecified@ +has_interim_cache = @has_interim_cache@ +has_ip_tos = @has_ip_tos@ +has_profiler = @has_profiler@ +has_so_mark = @has_so_mark@ +has_spdy = @has_spdy@ +has_tcmalloc = @has_tcmalloc@ +has_tests = @has_tests@ +has_wccp = @has_wccp@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +ifaddrsh = @ifaddrsh@ +includedir = @includedir@ +infodir = @infodir@ +ink_with_modules_local = @ink_with_modules_local@ +ink_with_modules_process = @ink_with_modules_process@ +install_sh = @install_sh@ +installbuilddir = @installbuilddir@ +iocore_include_dirs = @iocore_include_dirs@ +ip_transparent = @ip_transparent@ +jemalloch = @jemalloch@ +libdir = @libdir@ +libexecdir = @libexecdir@ +libgenh = @libgenh@ +localedir = @localedir@ +localstatedir = @localstatedir@ +logdir = @logdir@ +lzmah = @lzmah@ +machine_endianh = @machine_endianh@ +malloch = @malloch@ +mandir = @mandir@ +mathh = @mathh@ +max_api_stats = @max_api_stats@ +max_event_threads = @max_event_threads@ +max_host_name_len = @max_host_name_len@ +max_threads_per_type = @max_threads_per_type@ +mkdir_p = @mkdir_p@ +net_ppp_defsh = @net_ppp_defsh@ +netdbh = @netdbh@ +netinet_in_systmh = @netinet_in_systmh@ +netinet_inh = @netinet_inh@ +netinet_ip_icmph = @netinet_ip_icmph@ +netinet_iph = @netinet_iph@ +netinet_tcph = @netinet_tcph@ +oldincludedir = @oldincludedir@ +pcre_pcreh = @pcre_pcreh@ +pcreh = @pcreh@ +pdfdir = @pdfdir@ +pkgbindir = @pkgbindir@ +pkgcachedir = @pkgcachedir@ +pkgdocdir = @pkgdocdir@ +pkglocalstatedir = @pkglocalstatedir@ +pkglogdir = @pkglogdir@ +pkgpyexecdir = @pkgpyexecdir@ +pkgpythondir = @pkgpythondir@ +pkgruntimedir = @pkgruntimedir@ +pkgsbindir = @pkgsbindir@ +pkgsysconfdir = @pkgsysconfdir@ +pkgsysgroup = @pkgsysgroup@ +pkgsysuser = @pkgsysuser@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +pthread_nph = @pthread_nph@ +pyexecdir = @pyexecdir@ +pythondir = @pythondir@ +readline_readlineh = @readline_readlineh@ +rel_bindir = @rel_bindir@ +rel_cachedir = @rel_cachedir@ +rel_datadir = @rel_datadir@ +rel_docdir = @rel_docdir@ +rel_exec_prefix = @rel_exec_prefix@ +rel_includedir = @rel_includedir@ +rel_infodir = @rel_infodir@ +rel_installbuilddir = @rel_installbuilddir@ +rel_libdir = @rel_libdir@ +rel_libexecdir = @rel_libexecdir@ +rel_localstatedir = @rel_localstatedir@ +rel_logdir = @rel_logdir@ +rel_mandir = @rel_mandir@ +rel_prefix = @rel_prefix@ +rel_runtimedir = @rel_runtimedir@ +rel_sbindir = @rel_sbindir@ +rel_sysconfdir = @rel_sysconfdir@ +runtimedir = @runtimedir@ +sbindir = @sbindir@ +schedh = @schedh@ +sharedstatedir = @sharedstatedir@ +siginfoh = @siginfoh@ +srcdir = @srcdir@ +stroptsh = @stroptsh@ +sys_byteorderh = @sys_byteorderh@ +sys_cpuseth = @sys_cpuseth@ +sys_epollh = @sys_epollh@ +sys_eventh = @sys_eventh@ +sys_ioctlh = @sys_ioctlh@ +sys_mounth = @sys_mounth@ +sys_paramh = @sys_paramh@ +sys_pseth = @sys_pseth@ +sys_sockioh = @sys_sockioh@ +sys_sysctlh = @sys_sysctlh@ +sys_sysinfoh = @sys_sysinfoh@ +sys_sysmacrosh = @sys_sysmacrosh@ +sys_systeminfoh = @sys_systeminfoh@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +use_diags = @use_diags@ +use_epoll = @use_epoll@ +use_fast_sdk = @use_fast_sdk@ +use_freelist = @use_freelist@ +use_hwloc = @use_hwloc@ +use_kqueue = @use_kqueue@ +use_linux_native_aio = @use_linux_native_aio@ +use_port = @use_port@ +use_posix_cap = @use_posix_cap@ +use_reclaimable_freelist = @use_reclaimable_freelist@ +use_tls_alpn = @use_tls_alpn@ +use_tls_eckey = @use_tls_eckey@ +use_tls_npn = @use_tls_npn@ +use_tls_sni = @use_tls_sni@ +use_tproxy = @use_tproxy@ +valuesh = @valuesh@ +waith = @waith@ +zlibh = @zlibh@ +trafficshelldir = $(pkgdocdir)/trafficshell +dist_trafficshell_DATA = \ + man/show_alarms.1 \ + man/show_cache.1 \ + man/show_cache-stats.1 \ + man/show_cluster.1 \ + man/show_dns-resolver.1 \ + man/show_dns-stats.1 \ + man/show_hostdb.1 \ + man/show_hostdb-stats.1 \ + man/show_http.1 \ + man/show_http-stats.1 \ + man/show_http-trans-stats.1 \ + man/show_icp.1 \ + man/show_icp-stats.1 \ + man/show_logging.1 \ + man/show_logging-stats.1 \ + man/show_network.1 \ + man/show_parent.1 \ + man/show_port-tunnels.1 \ + man/show_proxy.1 \ + man/show_proxy-stats.1 \ + man/show_remap.1 \ + man/show_scheduled-update.1 \ + man/show_security.1 \ + man/show_socks.1 \ + man/show_ssl.1 \ + man/show_status.1 \ + man/show_version.1 \ + man/show_virtual-ip.1 + +@BUILD_MANPAGES_TRUE@man1_MANS = $(TS_MAN1_MANPAGES) +@BUILD_MANPAGES_TRUE@man3_MANS = $(TS_MAN3_MANPAGES) +@BUILD_MANPAGES_TRUE@man5_MANS = $(TS_MAN5_MANPAGES) +@BUILD_MANPAGES_TRUE@man8_MANS = $(TS_MAN8_MANPAGES) +EXTRA_DIST = \ + Doxyfile.in \ + $(dist_trafficshell_DATA) + + +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +PAPER = letter +BUILDDIR = docbuild + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = $(SPHINXOPTS) +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(SPHINXOPTS) + +# The PAPER setting variables requires recursive make variable expansion, which automake +# detects as non-portable. We bounce this through a shell script and do the expansion there. +SBUILD = PAPEROPT_a4="$(PAPEROPT_a4)" PAPEROPT_letter="$(PAPEROPT_letter)" PAPER="$(PAPER)" \ + $(srcdir)/sbuild $(SPHINXBUILD) \ + -D version=$(TS_VERSION_STRING) \ + -D release=$(TS_VERSION_STRING) \ + -c $(srcdir) \ + $(ALLSPHINXOPTS) + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign doc/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign doc/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +Doxyfile: $(top_builddir)/config.status $(srcdir)/Doxyfile.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +install-man1: $(man1_MANS) + @$(NORMAL_INSTALL) + @list1='$(man1_MANS)'; \ + list2=''; \ + test -n "$(man1dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.1[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ + done; } + +uninstall-man1: + @$(NORMAL_UNINSTALL) + @list='$(man1_MANS)'; test -n "$(man1dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir) + +install-man3: $(man3_MANS) + @$(NORMAL_INSTALL) + @list1='$(man3_MANS)'; \ + list2=''; \ + test -n "$(man3dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man3dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man3dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.3[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man3dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man3dir)" || exit $$?; }; \ + done; } + +uninstall-man3: + @$(NORMAL_UNINSTALL) + @list='$(man3_MANS)'; test -n "$(man3dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man3dir)'; $(am__uninstall_files_from_dir) + +install-man5: $(man5_MANS) + @$(NORMAL_INSTALL) + @list1='$(man5_MANS)'; \ + list2=''; \ + test -n "$(man5dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man5dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man5dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.5[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^5][0-9a-z]*$$,5,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man5dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man5dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man5dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man5dir)" || exit $$?; }; \ + done; } + +uninstall-man5: + @$(NORMAL_UNINSTALL) + @list='$(man5_MANS)'; test -n "$(man5dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^5][0-9a-z]*$$,5,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man5dir)'; $(am__uninstall_files_from_dir) + +install-man8: $(man8_MANS) + @$(NORMAL_INSTALL) + @list1='$(man8_MANS)'; \ + list2=''; \ + test -n "$(man8dir)" \ + && test -n "`echo $$list1$$list2`" \ + || exit 0; \ + echo " $(MKDIR_P) '$(DESTDIR)$(man8dir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(man8dir)" || exit 1; \ + { for i in $$list1; do echo "$$i"; done; \ + if test -n "$$list2"; then \ + for i in $$list2; do echo "$$i"; done \ + | sed -n '/\.8[a-z]*$$/p'; \ + fi; \ + } | while read p; do \ + if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; echo "$$p"; \ + done | \ + sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \ + sed 'N;N;s,\n, ,g' | { \ + list=; while read file base inst; do \ + if test "$$base" = "$$inst"; then list="$$list $$file"; else \ + echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man8dir)/$$inst'"; \ + $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man8dir)/$$inst" || exit $$?; \ + fi; \ + done; \ + for i in $$list; do echo "$$i"; done | $(am__base_list) | \ + while read files; do \ + test -z "$$files" || { \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man8dir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(man8dir)" || exit $$?; }; \ + done; } + +uninstall-man8: + @$(NORMAL_UNINSTALL) + @list='$(man8_MANS)'; test -n "$(man8dir)" || exit 0; \ + files=`{ for i in $$list; do echo "$$i"; done; \ + } | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^8][0-9a-z]*$$,8,;x' \ + -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \ + dir='$(DESTDIR)$(man8dir)'; $(am__uninstall_files_from_dir) +install-dist_trafficshellDATA: $(dist_trafficshell_DATA) + @$(NORMAL_INSTALL) + @list='$(dist_trafficshell_DATA)'; test -n "$(trafficshelldir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(trafficshelldir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(trafficshelldir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(trafficshelldir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(trafficshelldir)" || exit $$?; \ + done + +uninstall-dist_trafficshellDATA: + @$(NORMAL_UNINSTALL) + @list='$(dist_trafficshell_DATA)'; test -n "$(trafficshelldir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(trafficshelldir)'; $(am__uninstall_files_from_dir) +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(DATA) +installdirs: + for dir in "$(DESTDIR)$(trafficshelldir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-local mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: html-local + +info: info-am + +info-am: + +install-data-am: install-dist_trafficshellDATA + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: install-man1 install-man3 install-man5 install-man8 + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-dist_trafficshellDATA + +uninstall-man: uninstall-man1 uninstall-man3 uninstall-man5 \ + uninstall-man8 + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic clean-libtool \ + clean-local cscopelist-am ctags-am distclean distclean-generic \ + distclean-libtool distdir dvi dvi-am html html-am html-local \ + info info-am install install-am install-data install-data-am \ + install-dist_trafficshellDATA install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-man1 \ + install-man3 install-man5 install-man8 install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ + uninstall-am uninstall-dist_trafficshellDATA uninstall-man \ + uninstall-man1 uninstall-man3 uninstall-man5 uninstall-man8 + + +.PHONY: help dirhtml singlehtml epub latex man changes linkcheck doxygen + +@BUILD_MANPAGES_TRUE@$(man1_MANS) $(man3_MANS) $(man5_MANS) $(man8_MANS): man + +# Hook the 'all' target so that the man pages get generated in the "all" target, prior +# to "make install". If we leave it to "make install" time, then the man pages are likely +# to me generated as root. +@BUILD_MANPAGES_TRUE@all-am: $(man1_MANS) $(man3_MANS) $(man5_MANS) $(man8_MANS) + +clean-local: + -rm -rf html warn.log + -rm -rf $(BUILDDIR)/doctrees $(BUILDDIR)/html $(BUILDDIR)/dirhtml $(BUILDDIR)/singlehtml $(BUILDDIR)/epub $(BUILDDIR)/latex $(BUILDDIR)/man + +doxygen: Doxyfile + $(DOXYGEN) + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " man to make manual pages" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + +html-local: + $(SBUILD) -d $(BUILDDIR)/doctrees -b html $(srcdir) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SBUILD) -d $(BUILDDIR)/doctrees -b dirhtml $(srcdir) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SBUILD) -d $(BUILDDIR)/doctrees -b singlehtml $(srcdir) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +epub: + $(SBUILD) -d $(BUILDDIR)/doctrees -b epub $(srcdir) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SBUILD) -d $(BUILDDIR)/doctrees -b latex $(srcdir) $(BUILDDIR)/latex + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/latex." + +man: + $(SBUILD) -d $(BUILDDIR)/doctrees -b man $(srcdir) $(BUILDDIR)/man + @echo + @echo "Build finished. The man pages are in $(BUILDDIR)/man." + +changes: + @echo FIXME + +linkcheck: + @echo FIXME + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/doc/admin/cluster-howto.en.rst b/doc/admin/cluster-howto.en.rst new file mode 100644 index 00000000..584b8dd4 --- /dev/null +++ b/doc/admin/cluster-howto.en.rst @@ -0,0 +1,168 @@ +.. _traffic-server-cluster: + +Traffic Server Cluster +********************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + + +Traffic Server scales from a single node to multiple nodes that form a +cluster allowing you to improve system performance and reliability. + +.. toctree:: + :maxdepth: 1 + +Understanding Traffic Server Clusters +===================================== + +A Traffic Server cluster consists of multiple Traffic Server nodes. The +nodes in a cluster share configuration information and can form a single +logical cache. Traffic Server detects the addition and deletion of nodes +in the cluster automatically and can detect when a node is unavailable. +Traffic Server uses its own protocol for clustering, which is multicast +for node location and heartbeat, but unicast for all data exchange +within the cluster. Traffic Server has two clustering modes: + +- Management-only mode; refer to `Management-Only Clustering`_ below. +- Full-clustering mode; refer to `Full Clustering`_ + +Management-Only Clustering +========================== + +In management-only clustering mode, Traffic Server cluster nodes share +configuration information. You can administer all the nodes at the same +time. Traffic Server uses a multicast management protocol to provide a +single system image of your Traffic Server cluster. Information about +cluster membership, configuration, and exceptions is shared across all +nodes, and the :program:`traffic_manager` process automatically propagates +configuration changes to all the nodes. + +Full Clustering +=============== + +In full-clustering mode, as well as sharing configuration information, a +Traffic Server cluster distributes its cache across its nodes into a +single, virtual object store, rather than replicating the cache node by +node. Traffic Server can provide an enormous aggregate cache size and +can maximize cache hit rate by storing objects only once across the +entire cluster. + +A fully clustered Traffic Server maps objects to specific nodes in the +cluster. When a node receives a request, it checks to see if the request +is a hit somewhere in the cluster. If the request is a hit on a +different node, the node handling the request obtains the object from +the hit node and serves it to the client. Traffic Server uses its own +communication protocol to obtain an object from sibling cluster nodes. + +If a node fails or is shut down and removed, Traffic Server removes +references to the missing node on all nodes in the cluster. + +Full clustering recommends a dedicated network interface for cluster +communication to get better performance. + +Enabling Clustering Mode +======================== + +Before you put a node into a cluster, please make sure the following +things are in order: + +- You are using the same operation system on all nodes: + + - Using the same distribution, e.g.: RHEL 5.5 + - Have same kernel, e.g.: 2.6.18-194.17.1.el5 + - The same architecture, e.g.: ``x86_64`` + +- You have the same version of Traffic Server installed +- The same hardware +- On the same switch or same VLAN. + +Traffic Server does not apply the clustering mode change to all the +nodes in the cluster. You must change the clustering mode on each node +individually. You may following these instructions: + +1. Setup the same cluster name, with :ts:cv:`proxy.config.proxy_name`, e.g. MyCluster. + +2. Set :ts:cv:`proxy.local.cluster.type` to ``1``, to enable cluster mode. The + following values of this configuration are valid + +================= ================================================== +Value Description +================= ================================================== +1 full-clustering mode +2 management-only mode +3 no clustering (*default*) +================= ================================================== + +3. Setup a :ts:cv:`proxy.config.cluster.ethernet_interface`, e.g.: ``eth0``. + This should be replaced by your real interface; we recommends a + dedicated interface here. Refer to :ts:cv:`proxy.local.cluster.type` for a full description. + +4. Enable configuration changes:: + + traffic_line -x + +5. Restart traffic server:: + + traffic_line -L + + The :program:`traffic_server` and :program:`traffic_manager` processes will need to + restart after the change of :ts:cv:`proxy.local.cluster.type` and + :ts:cv:`proxy.config.cluster.ethernet_interface` have taken place. + +Traffic Server will join the cluster in about 10 seconds, and you can +run :option:`traffic_line -r` `proxy.process.cluster.nodes` to check the hosts +in the cluster, or check out the ``cluster.config`` in the configuration +directory. This configuration is generated by the system, and should not be +edited. It contains a list of the machines that are currently members of the +cluster, for example:: + + # 3 + 127.1.2.3:80 + 127.1.2.4:80 + 127.1.2.5:80 + +After successfully joining of a cluster, all changes of global +configurations on any node, will take effect on **all** nodes. This means you +can make changes on any cluster node member, and they are automatically +distributed to all members. + +Deleting Nodes from a Cluster +============================= + +To delete a node from the Traffic Server cluster, just roll back +:ts:cv:`proxy.local.cluster.type` to the default value 3 and reload. + +Performance tweak for busy Cluster +================================== + +Starting from v3.2.0, Apache Traffic Server can handle multiple internal +cluster connections, and we can tweak the number of Cluster threads. Each +of the thread will keep one connection to all of peering cluster machines. + +Increasing Cluster threads +-------------------------- + +In the cluster environment, the current performance of the cluster threads +will consume the same cpu usage as a normal network thread. It's reasonable +to keep roughly the same number of cluster threads as network threads. For +example, if you are running a system with 10 network processing threads, +you can set the number of cluster threads by modifying +:ts:cv:`proxy.config.cluster.threads` to ``10``. E.g.:: + + traffic_line -s proxy.config.cluster.threads -v 10 diff --git a/doc/admin/configuring-cache.en.rst b/doc/admin/configuring-cache.en.rst new file mode 100644 index 00000000..4df5bc04 --- /dev/null +++ b/doc/admin/configuring-cache.en.rst @@ -0,0 +1,389 @@ +.. _configuring-the-cache: + +Configuring the Cache +********************* + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +The Traffic Server cache consists of a high-speed object database called +the **object store** that indexes objects according to URLs and their +associated headers. + +.. toctree:: + :maxdepth: 2 + +The Traffic Server Cache +======================== + +The Traffic Server cache consists of a high-speed object database called +the **object store**. The object store indexes objects according to URLs +and associated headers. This enables Traffic Server to store, retrieve, +and serve not only web pages, but also parts of web pages - which +provides optimum bandwidth savings. Using sophisticated object +management, the object store can cache alternate versions of the same +object (versions may differ because of dissimilar language or encoding +types). It can also efficiently store very small and very large +documents, thereby minimizing wasted space. When the cache is full, +Traffic Server removes stale data to ensure the most requested objects +are kept readily available and fresh. + +Traffic Server is designed to tolerate total disk failures on any of the +cache disks. If the disk fails completely, then Traffic Server marks the +entire disk as corrupt and continues using the remaining disks. An alarm +is then created to indicate which disk failed. If all of the cache disks +fail, then Traffic Server goes into proxy-only mode. + +You can perform the following cache configuration tasks: + +- Change the total amount of disk space allocated to the cache: refer + to `Changing Cache Capacity`_. +- Partition the cache by reserving cache disk space for specific + protocols and origin servers/domains: refer to `Partitioning the Cache`_. +- Delete all data in the cache: refer to `Clearing the Cache`_. +- Override cache directives for a requested domain name, regex on a url, + hostname or ip, with extra filters for time, port, method of the request + (and more). ATS can be configured to never cache; always cache; + ignore no-cache directives, etc. These are configured in :file:`cache.config`. + +The RAM Cache +============= + +Traffic Server maintains a small RAM cache of extremely popular objects. +This RAM cache serves the most popular objects as quickly as possible +and reduces load on disks, especially during temporary traffic peaks. +You can configure the RAM cache size to suit your needs, as described in +:ref:`changing-the-size-of-the-ram-cache` below. + +The RAM cache supports two cache eviction algorithms, a regular **LRU** +(*Least Recently Used*) and the more advanced **CLFUS** (*Clocked Least +Frequently Used by Size*, which balances recentness, frequency and size +to maximize hit rate -- similar to a most frequently used algorithm). +The default is to use **CLFUS**, and this is controlled via +:ts:cv:`proxy.config.cache.ram_cache.algorithm`. + +Both the **LRU** and **CLFUS** RAM caches support a configuration to increase +scan resistance. In a typical **LRU**, if you request all possible objects in +sequence, you will effectively churn the cache on every request. The option +:ts:cv:`proxy.config.cache.ram_cache.use_seen_filter` can be set to add some +resistance against this problem. + +In addition, **CLFUS** also supports compressing in the RAM cache itself. +This can be useful for content which is not compressed by itself (e.g. +images). This should not be confused with *Content-Encoding: gzip*, this +feature is only thereto save space internally in the RAM cache itself. As +such, it is completely transparent to the User-Agent. The RAM cache +compression is enabled with the option +:ts:cv:`proxy.config.cache.ram_cache.compress`. The default is 0, which means +no compression. Other possible values are 1 for **fastlz**, 2 for **libz** and +3 for **liblzma**. + + +.. _changing-the-size-of-the-ram-cache: + +Changing the Size of the RAM Cache +================================== + +Traffic Server provides a dedicated RAM cache for fast retrieval of +popular small objects. The default RAM cache size is automatically +calculated based on the number and size of the cache partitions you have +configured. If you've partitioned your cache according to protocol +and/or hosts, then the size of the RAM cache for each partition is +proportional to the size of that partition. + +You can increase the RAM cache size for better cache hit performance. +However, if you increase the size of the RAM cache and observe a +decrease in performance (such as increased latencies), then it's +possible that the operating system requires more memory for network +resources. In such instances, you should return the RAM cache size to +its previous value. + +To change the RAM cache size: + +1. Stop Traffic Server. +2. Set the variable :ts:cv:`proxy.config.cache.ram_cache.size` + to specify the size of the RAM cache. The default value of -1 means + that the RAM cache is automatically sized at approximately 1MB per + gigabyte of disk. +3. Restart Traffic Server. If you increase the RAM cache to a size of + 1GB or more, then restart with the :program:`trafficserver` command + (refer to :ref:`start-traffic-server`). + +  + +Changing Cache Capacity +======================= + +You can increase or reduce the total amount of disk space allocated to +the cache without clearing the content. To check the size of the cache +(in bytes), enter the command :option:`traffic_line -r` ``proxy.process.cache.bytes_total``. + +Increasing Cache Capacity +------------------------- + +To increase the total amount of disk space allocated to the cache on +existing disks or to add new disks to a Traffic Server node, follow the +steps below: + +1. Stop Traffic Server. +2. Add hardware, if necessary. +3. Edit :file:`storage.config` to increase the amount of disk space allocated + to the cache on existing disks or describe the new hardware you are adding. +4. Restart Traffic Server. + +Reducing Cache Capacity +----------------------- + +To reduce the total amount of disk space allocated to the cache on an +existing disk or to remove disks from a Traffic Server node, follow the +steps below: + +1. Stop Traffic Server. +2. Remove hardware, if necessary. +3. Edit :file:`storage.config` to reduce the amount of disk space allocated + to the cache on existing disks or delete the reference to the hardware you're removing. +4. Restart Traffic Server. + +.. important:: In :file:`storage.config`, a formatted or raw disk must be at least 128 MB. + +.. _partitioning-the-cache: + +Partitioning the Cache +====================== + +You can manage your cache space more efficiently and restrict disk usage +by creating cache volumes with different sizes for specific protocols. +You can further configure these volumes to store data from specific +origin servers and/or domains. The volume configuration must be the same +on all nodes in a :ref:`cluster `. + +Creating Cache Partitions for Specific Protocols +------------------------------------------------ + +You can create separate volumes for your cache that vary in size to +store content according to protocol. This ensures that a certain amount +of disk space is always available for a particular protocol. Traffic +Server currently supports the **http** partition type for HTTP objects. + +.. XXX: but not https? + +To partition the cache according to protocol: + +1. Enter a line in the :file:`volume.config` file for + each volume you want to create +2. Restart Traffic Server. + +Making Changes to Partition Sizes and Protocols +----------------------------------------------- + +After you've configured your cache volumes based on protocol, you can +make changes to the configuration at any time. Before making changes, +note the following: + +- You must stop Traffic Server before you change the cache volume size + and protocol assignment. +- When you increase the size of a volume, the contents of the volume + are *not* deleted. However, when you reduce the size of a volume, the + contents of the volume *are* deleted. +- When you change the volume number, the volume is deleted and then + recreated, even if the size and protocol type remain the same. +- When you add new disks to your Traffic Server node, volume sizes + specified in percentages will increase proportionately. +- A lot of changes to volume sizes might result in disk fragmentation, + which affects performance and hit rate. You should clear the cache + before making many changes to cache volume sizes (refer to `Clearing the Cache`_). + +Partitioning the Cache According to Origin Server or Domain +----------------------------------------------------------- + +After you have partitioned the cache according to size and protocol, you +can assign the volumes you created to specific origin servers and/or +domains. You can assign a volumes to a single origin server or to +multiple origin servers. However, if a volumes is assigned to multiple +origin servers, then there is no guarantee on the space available in the +volumes for each origin server. Content is stored in the volumes +according to popularity. In addition to assigning volumes to specific +origin servers and domains, you must assign a generic volume to store +content from all origin servers and domains that are not listed. This +generic volume is also used if the partitions for a particular origin +server or domain become corrupt. If you do not assign a generic volume, +then Traffic Server will run in proxy-only mode. + +.. note:: + + You do *not* need to stop Traffic Server before you assign + volumes to particular hosts or domains. However, this type of + configuration is time-consuming and can cause a spike in memory usage. + Therefore, it's best to configure partition assignment during periods of + low traffic. + +To partition the cache according to hostname and domain: + +1. Configure the cache volumes according to size and protocol, as + described in `Creating Cache Partitions for Specific Protocols`_. +2. Create a separate volume based on protocol for each host and domain, + as well as an additional generic partition to use for content that + does not belong to these origin servers or domains. The volumes do + not need to be the same size. +3. Enter a line in the :file:`hosting.config` file to + allocate the volume(s) used for each origin server and/or domain +4. Assign a generic volume to use for content that does not belong to + any of the origin servers or domains listed in the file. If all + volumes for a particular origin server become corrupt, then Traffic + Server will also use the generic volume to store content for that + origin server as per :file:`hosting.config`. +5. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Configuring the Cache Object Size Limit +======================================= + +By default, Traffic Server allows objects of any size to be cached. You +can change the default behavior and specify a size limit for objects in +the cache via the steps below: + +1. Set :ts:cv:`proxy.config.cache.max_doc_size` + to specify the maximum size allowed for objects in the cache in + bytes. ``0`` (zero) if you do not want a size limit. +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +.. _clearing-the-cache: + +Clearing the Cache +================== + +When you clear the cache, you remove all data from the entire cache - +including data in the host database. You should clear the cache before +performing certain cache configuration tasks, such as partitioning. You +cannot clear the cache when Traffic Server is running. + +To clear the cache: + +1. Stop Traffic Server (refer to :ref:`Stopping Traffic Server `) +2. Enter the following command to clear the cache: :: + + traffic_server -Cclear + + The ``clear`` command deletes all data in the object store and the + host database. Traffic Server does not prompt you to confirm the + deletion. + +3. Restart Traffic Server (refer to :ref:`Starting Traffic Server `). + +Removing an Object From the Cache +================================= + +Traffic Server accepts the custom HTTP request method ``PURGE`` when +removing a specific object from cache. If the object is found in the +cache and is successfully removed, then Traffic Server responds with a +``200 OK`` HTTP message; otherwise, a ``404 File Not Found`` message is +returned. + +In the following example, Traffic Server is running on the domain +``example.com`` and you want to remove the image ``remove_me.jpg`` +from cache. Because by default we do not permit ``PURGE`` requests from +any other IP, we connect to the daemon via localhost: :: + + $ curl -X PURGE -H 'Host: example.com' -v "http://localhost/remove_me.jpg" + * About to connect() to localhost port 80 (#0) + * Trying 127.0.0.1... connected + * Connected to localhost (127.0.0.1) port 80 (#0) + + > PURGE /remove_me.jpg HTTP/1.1 + > User-Agent: curl/7.19.7 + > Host: example.com + > Accept: */* + > + < HTTP/1.1 200 Ok + < Date: Thu, 08 Jan 2010 20:32:07 GMT + < Connection: keep-alive + +The next time Traffic Server receives a request for the removed object, +it will contact the origin server to retrieve it (i.e., it has been +purged from the Traffic Server cache). + +Note: The procedure above only removes an object from a *specific* +Traffic Server cache. Users may still see the old (removed) content if +it was cached by intermediary caches or by the end-users' web browser. + +.. _inspecting-the-cache: + +Inspecting the Cache +==================== + +Traffic Server provides a Cache Inspector utility that enables you to +view, delete, and invalidate URLs in the cache (HTTP only). The Cache +Inspector utility is a powerful tool that's capable of deleting *all* +the objects in your cache; therefore, make sure that only authorized +administrators are allowed to access this utility, see :ref:`controlling-client-access-to-cache` and the ``@src_ip`` option in :file:`remap.config`. + +Accessing the Cache Inspector Utility +------------------------------------- + +To access the Cache Inspector utility, follow the steps below: + +#. Set :ts:cv:`proxy.config.http_ui_enabled` to ``1``. +#. To access the cache inspector in reverse proxy mode, you must add a + remap rule to :file:`remap.config` to expose the URL. This should be + restricted to a limited set of hosts using the ``@src_ip`` option. + To restrict access to the network 172.28.56.0/24, use :: + + map http://yourhost.com/myCI/ http://{cache} @action=allow @src_ip=172.28.56.1-172.28.56.254 + +#. From the Traffic Server ``bin`` directory, enter the following + command to re-read the configuration file: ``traffic_line -x`` +#. Open your web browser and configure it to use your Traffic Server as + a proxy server. Type the following URL:: + + http://yourhost/myCI/ + +#. The Cache page opens. + +Using the Cache Page +-------------------- + +The **Cache page** provides several options that enable you to view and +delete the contents of your cache: + +- Click **Lookup url** to search for a particular URL in the cache. + When Traffic Server finds the URL in the cache, it displays details + about the object that corresponds to the URL (such as the header + length and the number of alternates). From the display page, you can + delete the URL from the cache. +- Click **Delete url** to delete a particular URL or list of URLs from + the cache. Traffic Server indicates if a delete is successful. +- Click **Regex lookup** to search for URLs that match one or more + regular expressions. From the display page, you can delete the URLs + listed. For example, enter the following to search for all URLs that + end in html and are prefixed with ``http://www.dianes.com``: + ``http://www.dianes.com/.*\.html$`` +- Click **Regex delete** to delete all URLs that match a specified + regular expression. For example, enter the following to delete all + HTTP URLs that end in ``html``: ``http://.*\.html$`` +- Click **Regex invalidate** to invalidate URLs that match a specified + regular expression. When you invalidate a URL, Traffic Server marks + the object that corresponds to the URL as stale in the cache. Traffic + Server then contacts the origin server to check if the object is + still fresh (revalidates) before serving it from the cache. + +.. note:: + + Only one administrator should delete and invalidate cache + entries from the Cache page at any point in time. Changes made by + multiple administrators at the same time can lead to unpredictable + results. diff --git a/doc/admin/configuring-traffic-server.en.rst b/doc/admin/configuring-traffic-server.en.rst new file mode 100644 index 00000000..a06eaea6 --- /dev/null +++ b/doc/admin/configuring-traffic-server.en.rst @@ -0,0 +1,83 @@ + +.. _configuring-traffic-server: + +Configuring Traffic Server +************************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Traffic Server provides several options for configuring the system. + +.. toctree:: + :maxdepth: 2 + +.. _configure-using-traffic-line: + +Configure Traffic Server Using Traffic Line +=========================================== + +Traffic Line enables you to quickly and easily change your Traffic +Server configuration via command-line interface. Alternatively, you can +also use :program:`traffic_shell` to configure Traffic Server. + +View Configuration Options in Traffic Line +------------------------------------------ + +To view a configuration setting, enter the following command:: + + traffic_line -r var + +where ``var`` is the variable associated with the configuration +option. For a list of variables, refer to :ref:`configuration-variables`. + +Change Configuration Options in Traffic Line +-------------------------------------------- + +To change the value of a configuration setting, enter the following +command:: + + traffic_line -s var -v value + +where ``var`` is the variable associated with the configuration option +and ``value`` is the value you want to use. For a list of the +variables, see :ref:`configuration-variables`. + +Configure Traffic Server Using Configuration Files +================================================== + +As an alternative to using Traffic Line or Traffic Shell, you can change +Traffic Server configuration options by manually editing specific +variables in the :file:`records.config` file. +After modifying the :file:`records.config` file, +Traffic Server must reread the configuration files: enter the Traffic +Line command :option:`traffic_line -x`. You may need to restart Traffic Server +to apply some of the configuration changes. + +The following is a sample portion of the +:file:`records.config` file: + +.. figure:: ../static/images/admin/records.jpg + :align: center + :alt: Sample records.config file + + Sample records.config file + +In addition to the :file:`records.config` file, +Traffic Server provides other configuration files that are used to +configure specific features. You can manually edit all configuration +files as described in :ref:`configuration-file-reference`. diff --git a/doc/admin/event-logging-formats.en.rst b/doc/admin/event-logging-formats.en.rst new file mode 100644 index 00000000..751ec35e --- /dev/null +++ b/doc/admin/event-logging-formats.en.rst @@ -0,0 +1,413 @@ +.. _event-logging-formats: + +Event Logging Formats +********************* + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +This document provides a reference for all the different logging formats +Traffic Server supports. +Rather than just reading about those formats, you may also want to try our +`online event log builder `_ for an interactive way of +building and understanding log formats. + + +.. toctree:: + :maxdepth: 2 + +.. _custom-logging-fields: + +Custom Logging Fields +===================== + +The following list describes Traffic Server custom logging fields. + +``{HTTP header field name}cqh`` + Logs the information in the requested field of the client request + HTTP header. For example, ``%<{Accept-Language}cqh>`` logs the + ``Accept-Language:`` field in client request headers. + +``{HTTP header field name}pqh`` + Logs the information in the requested field of the proxy request + HTTP header. For example, ``%<{Authorization}pqh>`` logs + the ``Authorization:`` field in proxy request headers. + +``{HTTP header field name}psh`` + Logs the information in the requested field of the proxy response + HTTP header. For example, ``%<{Retry-After}psh>`` logs the + ``Retry-After:`` field in proxy response headers. + +``{HTTP header field name}ssh`` + Logs the information in the requested field of the server response + HTTP header. For example, ``%<{Age}ssh>`` logs the ``Age:`` field in + server response headers. + +``caun`` + The client authenticated username; result of the RFC931/ident lookup + of the client username. + +``cfsc`` + The client finish status code; specifies whether the client request + to Traffic Server was successfully completed (``FIN``) or + interrupted (``INTR``). + +``chi`` + The IP address of the client's host machine. + +``chih`` + The IP address of the client's host machine in hexadecimal. + +``chp`` + The port number of the client's host machine. + +``cps`` + Client Protocol Stack, the output would be the conjunction of + protocol names in the stack spliced with '+', such as "TLS+SPDY". + +``cqbl`` + The client request transfer length; the body length in the client + request to Traffic Server (in bytes). + +``cqhl`` + The client request header length; the header length in the client + request to Traffic Server. + +``cqhm`` + The HTTP method in the client request to Traffic Server: ``GET``, + ``POST``, and so on (subset of ``cqtx``). + +``cqhv`` + The client request HTTP version. + +``cqtd`` + The client request timestamp. Specifies the date of the client + request in the format yyyy-mm-dd, where yyyy is the 4-digit year, mm + is the 2-digit month, and dd is the 2-digit day. + +``cqtn`` + The client request timestamp; date and time of the client's request + (in the Netscape timestamp format). + +``cqtq`` + The client request timestamp, with millisecond resolution. + +``cqts`` + The client-request timestamp in Squid format; the time of the client + request since January 1, 1970 UTC. Time is expressed in seconds, + with millisecond resolution. + +``cqtt`` + The client request timestamp. The time of the client request in the + format hh:mm:ss, where hh is the two-digit hour in 24-hour format, + mm is the two-digit minutes value, and ss is the 2-digit seconds + value (for example, 16:01:19). + +``cqtx`` + The full HTTP client request text, minus headers; for example, :: + + GET http://www.company.com HTTP/1.0 + + In reverse proxy mode, Traffic Server logs the rewritten/mapped URL + (according to the rules in the + :file:`remap.config` file), _not_ the pristine/unmapped URL. + +``cqu`` + The universal resource identifier (URI) of the request from client + to Traffic Server (subset of ``cqtx`` ). + + In reverse proxy mode, Traffic Server logs the rewritten/mapped URL + (according to the rules in the + :file:`remap.config` file), + _not_ the pristine/unmapped URL. + +``cquc`` + The client request canonical URL. This differs from ``cqu`` in that + blanks (and other characters that might not be parsed by log + analysis tools) are replaced by escape sequences. The escape + sequence is a percentage sign followed by the ASCII code number in + hex. + + See `cquuc`_. + +``cqup`` + The client request URL path; specifies the argument portion of the + URL (everything after the host). For example, if the URL is + ``http://www.company.com/images/x.gif``, then this field displays + ``/images/x.gif`` + + See `cquup`_. + +``cqus`` + The client request URL scheme. + +.. _cquuc: + +``cquuc`` + The client request unmapped URL canonical. This field records a URL + before it is remapped (reverse proxy mode). + +.. _cquup: + +``cquup`` + The client request unmapped URL path. This field records a URL path + before it is remapped (reverse proxy mode). + +.. _cquuh: + +``cquuh`` + The client request unmapped URL host. This field records a URL's + host before it is remapped (reverse proxy mode). + +``crat`` + The Retry-After time in seconds, if specified by the origin server. + +``crc`` + The cache result code; specifies how the cache responded to the + request (``HIT``, ``MISS``, and so on). + +``csscl`` + The cached response length (in bytes) from origin server to Traffic + Server. + +``csshl`` + The cached header length in the origin server response to Traffic + Server (in bytes). + +``csshv`` + The cached server response HTTP version (1.0, 1.1, etc.). + +``csssc`` + The cached HTTP response status code from origin server to Traffic + Server. + +``cwr`` + The cache write result (``-``, ``WL_MISS``, ``INTR```, ``ERR`` or ``FIN``) + +``cwtr`` + The cache write transform result + +``fsiz`` + The size of the file (*n* bytes) as seen by the origin server. + +``pfsc`` + The proxy finish status code; specifies whether the Traffic Server + request to the origin server was successfully completed (``FIN``), + interrupted (``INTR``) or timed out (``TIMEOUT``). + +``phn`` + The hostname of the Traffic Server that generated the log entry in + collated log files. + +``phi`` + The IP of the Traffic Server that generated the log entry in + collated log files. + +``phr`` + The proxy hierarchy route; the route Traffic Server used to retrieve + the object. + +``pqbl`` + The proxy request transfer length; the body length in Traffic + Server's request to the origin server. + +``pqhl`` + The proxy request header length; the header length in Traffic + Server's request to the origin server. + +``pqsi`` + The proxy request server IP address (0 on cache hits and parent-ip + for requests to parent proxies). + +``pqsn`` + The proxy request server name; the name of the server that fulfilled + the request. + +``pscl`` + The length of the Traffic Server response to the client (in bytes). + +``psct`` + The content type of the document from server response header: (for + example, ``img/gif`` ). + +``pshl`` + The header length in Traffic Server's response to the client. + +``psql`` + The proxy response transfer length in Squid format (includes header + and content length). + +``pssc`` + The HTTP response status code from Traffic Server to the client. + +``shi`` + The IP address resolved from the DNS name lookup of the host in the + request. For hosts with multiple IP addresses, this field records + the IP address resolved from that particular DNS lookup. + + This can be misleading for cached documents. For example: if the + first request was a cache miss and came from **``IP1``** for server + **``S``** and the second request for server **``S``** resolved to + **``IP2``** but came from the cache, then the log entry for the + second request will show **``IP2``**. + +``shn`` + The hostname of the origin server. + +``sscl`` + The response length (in bytes) from origin server to Traffic Server. + +``sshl`` + The header length in the origin server response to Traffic Server + (in bytes). + +``sshv`` + The server response HTTP version (1.0, 1.1, etc.). + +``sssc`` + The HTTP response status code from origin server to Traffic Server. + +``ttms`` + The time Traffic Server spends processing the client request; the + number of milliseconds between the time the client establishes the + connection with Traffic Server and the time Traffic Server sends the + last byte of the response back to the client. + +``ttmsh`` + Same as ``ttms`` but in hexadecimal. + +``ttmsf`` + The time Traffic Server spends processing the client request as a + fractional number of seconds. Time is specified in millisecond + resolution; however, instead of formatting the output as an integer + (as with ``ttms``), the display is formatted as a floating-point + number representing a fractional number of seconds. + + For example: if the time is 1500 milliseconds, then this field + displays 1.5 while the ``ttms`` field displays 1500 and the ``tts`` + field displays 1. + +``tts`` + The time Traffic Server spends processing the client request; the + number of seconds between the time at which the client establishes + the connection with Traffic Server and the time at which Traffic + Server sends the last byte of the response back to the client. + + +.. _logging-format-cross-reference: + +Logging Format Cross-Reference +============================== + +The following sections illustrate the correspondence between Traffic +Server logging fields and standard logging fields for the Squid and +Netscape formats. + +Squid Logging Formats +--------------------- + +The following is a list of the Squid logging fields and the +corresponding logging field symbols. + +================== ============= +Squid Field Symbols +================== ============= +``time`` ``cqts`` +``elapsed`` ``ttms`` +``client`` ``chi`` +``action/code`` ``crc/pssc`` +``size`` ``psql`` +``method`` ``cqhm`` +``url`` ``cquc`` +``ident`` ``caun`` +``hierarchy/from`` ``phr/pqsn`` +``content`` ``psct`` +================== ============= + +Netscape Common Logging Formats +------------------------------- + +The following is a list of the Netscape Common logging fields and the +corresponding Traffic Server logging field symbols. + +=============== ============= +Netscape Common Field Symbols +=============== ============= +``host`` ``chi`` +``usr`` ``caun`` +``[time]`` ``[cqtn]`` +``"req"`` ``"cqtx"`` +``s1`` ``pssc`` +``c1`` ``pscl`` +=============== ============= + +Netscape Extended Logging Formats +--------------------------------- + +The following table lists the Netscape Extended logging fields and the +corresponding Traffic Server logging field symbols. + +================= ============= +Netscape Extended Field Symbols +================= ============= +``host`` ``chi`` +``usr`` ``caun`` +``[time]`` ``[cqtn]`` +``"req"`` ``"cqtx"`` +``s1`` ``pssc`` +``c1`` ``pscl`` +``s2`` ``sssc`` +``c2`` ``sscl`` +``b1`` ``cqbl`` +``b2`` ``pqbl`` +``h1`` ``cqhl`` +``h2`` ``pshl`` +``h3`` ``pqhl`` +``h4`` ``sshl`` +``xt`` ``tts`` +================= ============= + +Netscape Extended-2 Logging Formats +----------------------------------- + +The following is a list of the Netscape Extended-2 logging fields and +the corresponding Traffic Server logging field symbols. + +=================== ============= +Netscape Extended-2 Field Symbols +=================== ============= +``host`` ``chi`` +``usr`` ``caun`` +``[time]`` ``[cqtn]`` +``"req"`` ``"cqtx"`` +``s1`` ``pssc`` +``c1`` ``pscl`` +``s2`` ``sssc`` +``c2`` ``sscl`` +``b1`` ``cqbl`` +``b2`` ``pqbl`` +``h1`` ``cqhl`` +``h2`` ``pshl`` +``h3`` ``pqhl`` +``h4`` ``sshl`` +``xt`` ``tts`` +``route`` ``phr`` +``pfs`` ``cfsc`` +``ss`` ``pfsc`` +``crc`` ``crc`` +=================== ============= + diff --git a/doc/admin/explicit-proxy-caching.en.rst b/doc/admin/explicit-proxy-caching.en.rst new file mode 100644 index 00000000..60341b40 --- /dev/null +++ b/doc/admin/explicit-proxy-caching.en.rst @@ -0,0 +1,94 @@ +.. _explicit-proxy-caching: + +Explicit Proxy Caching +********************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. toctree:: + :maxdepth: 2 + +If you want to use Traffic Server as an explicit proxy cache, you must +configure client software (i.e., browsers) to send requests directly to +Traffic Server. + +If you do not configure Traffic Server to use the transparency option +(with which client requests are intercepted en route to origin servers +by a switch/router and rerouted to the Traffic Server machine), then +clients must configure their web browsers to send HTTP requests to the +Traffic Server proxy cache by configuring their browsers to download +proxy configuration instructions from a :ref:`PAC file ` +(Proxy Auto-Configuration file). + +Configuring Browsers Manually +============================= + +To manually configure a browser to send HTTP requests to Traffic Server, +clients must provide the following information: + +- The fully-qualified hostname or IP address of the Traffic Server node +- The Traffic Server proxy server port (port 8080) + +In addition, clients can specify *not* to use Traffic Server for certain +sites - in such cases, requests to the listed sites go directly to the +origin server. The procedures for manual configuration vary among +browser versions; refer to specific browser documentation for complete +proxy configuration instructions. You do not need to set any special +configuration options on Traffic Server if you want to accept requests +from manually-configured browsers. + +.. _using-a-pack-file: + +Using a PAC File +================ + +A **PAC file** is a specialized JavaScript function definition that a +browser calls to determine how requests are handled. Clients must +specify (in their browser settings) the URL from which the PAC file is +loaded. You can store a PAC file on Traffic Server (or on any server in +your network) and then provide the URL for this file to your clients. + +If you want to store a PAC file on the Traffic Server system, then you +must perform the following configuration: + +- Either copy an existing PAC file into the Traffic Server ``config`` + directory or enter a script that defines the proxy server + configuration settings in the ``proxy.pac`` file provided. The file + is empty by default. A sample script is provided in `Sample PAC + File`_. +- Specify the port Traffic Server uses to serve the PAC file. The + default port is 8083, see :ts:cv:`proxy.config.admin.autoconf_port`. + +Sample PAC File +--------------- + +The following sample PAC file instructs browsers to connect directly to +all hosts without a fully-qualified domain name and to all hosts in the +local domain. All other requests go to the Traffic Server named +``myproxy.company.com``.:: + + function FindProxyForURL(url, host) + { + if (isPlainHostName(host)) || (localHostOrDomainIs(host, ".company.com")) { + return "DIRECT"; + } + else + return "PROXY myproxy.company.com:8080; DIRECT"; + } + + diff --git a/doc/admin/faqs.en.rst b/doc/admin/faqs.en.rst new file mode 100644 index 00000000..3e1e3366 --- /dev/null +++ b/doc/admin/faqs.en.rst @@ -0,0 +1,392 @@ +.. _admin-faqs: + +FAQ and Troubleshooting Tips +**************************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. toctree:: + :maxdepth: 2 + +FAQs +==== + +How do you create a raw disk for the cache if all your disks have mounted file systems? +--------------------------------------------------------------------------------------- + +Create a large file on filesystem (with :manpage:`dd(1)`) and mount it as loopback device. +This is accomplished with :manpage:`losetup(8)` on Linux, :manpage:`lofiadm(1m)` on Solaris +and Illumos, and :manpage:`mdconfig(8)` on FreeBSD. + +How do disk I/O errors affect the cache and what does Traffic Server do when a cache disk fails? +------------------------------------------------------------------------------------------------ + +If a disk drive fails five successive I/O operations, then Traffic +Server considers the drive inaccessible and removes the entire disk from +the cache. Normal cache operations continue for all other Traffic Server +disk drives. + +If a client disconnects during the time that Traffic Server is downloading a large object, is any of the object saved in the cache? +----------------------------------------------------------------------------------------------------------------------------------- + +When a client disconnects during an HTTP operation, Traffic Server +continues to download the object from the origin server for up to 10 +seconds. If the transfer from the origin server completes successfully +within 10 seconds after the client disconnect, then Traffic Server +stores the object in cache. If the origin server download does *not* +complete successfully within 10 seconds, then Traffic Server disconnects +from the origin server and deletes the object from cache. Traffic Server +does not store partial documents in the cache. + +Can Traffic Server cache Java applets, JavaScript programs, or other application files like VBScript? +----------------------------------------------------------------------------------------------------- + +Yes, Traffic Server can store and serve Java applets, JavaScript +programs, VBScripts, and other executable objects from its cache +according to the freshness and cacheability rules for HTTP objects. +Traffic Server does not execute the applets, scripts, or programs, +however - these objects run only when the client system (ie, the one +that sent the request) loads them. + +In Squid- and Netscape-format log files, what do the cache result codes mean? +----------------------------------------------------------------------------- + +This is described in detail in the :ref:`log-formats-squid-format` documentation. + +What is recorded by the ``cqtx`` field in a custom log file? +------------------------------------------------------------ + +- In **forward proxy mode**, the cqtx field records the complete client + request in the log file (for example, ``GET http://www.company.com HTTP/1.0``). +- In **reverse proxy mode**, the cqtx field records the hostname or IP + address of the origin server because Traffic Server first remaps the + request as per map rules in the :file:`remap.config` file. + +Does Traffic Server refresh entries in its host database after a certain period of time if they have not been used? +------------------------------------------------------------------------------------------------------------------- + +By default, the Traffic Server host database observes the time-to-live +(``ttl``) values set by name servers. You can reconfigure Traffic Server +to ignore the ``ttl`` set by name servers and use a specific Traffic +Server setting instead. Alternatively, you can configure Traffic Server +to compare the ``ttl`` value set by the name server with the ``ttl`` +value set by Traffic Server, and then use either the lower or the higher +value. + +see :ts:cv:`proxy.config.hostdb.ttl_mode` for more info + +Can you improve the look of your custom response pages by using images, animated .gifs, and Java applets? +--------------------------------------------------------------------------------------------------------- + +No, because Traffic Server can only respond to clients with a single +text or HTML document. As a workaround, however, you can provide +references on your custom response pages to images, animated .gifs, Java +applets, or objects other than text which are located on a web server. +Add links in the body_factory template files in the same way you would +for any image in an HTML document (i.e., with the full URL in the +``SRC`` attribute). + +Can Traffic Server run in forward proxy and reverse proxy modes at the same time? +--------------------------------------------------------------------------------- + +Yes. When you enable reverse proxy mode, Traffic Server remaps incoming +requests according to the map rules in the :file:`remap.config` file. All +other requests that do not match a map rule are simply served in forward +proxy mode. + +If you want to run in reverse proxy only mode (wherein Traffic Server +does *not* serve requests that fail to match a map rule), then you must +set the configuration variable :ts:cv:`proxy.config.url_remap.remap_required` +to ``1`` in the :file:`records.config` file. + +How do I enable forward proxy mode +---------------------------------- + +Please refer to the :ref:`forward-proxy` documentation. + +How do I interpret the Via: header code? +---------------------------------------- + +The ``Via`` header string can be decoded with the `Via Decoder Ring `_. + +Support for HTTP Expect: Header +------------------------------- + +Traffic Server currently does not handle request Expect: headers +according to the HTTP/1.1 spec. + +Note that clients such as cURL automatically send Expect: for POST +requests with large POST bodies, with a 1 second timeout if a 100 +Continue response is not received. To avoid the timeout when using cURL +as a client to Traffic Server, you can turn off the Expect: header as +follows:: + + curl -H"Expect:" http://www.example.com/ + +C (libcurl):: + + struct curl_slist *header_list=NULL; + header_list = curl_slist_append(header_list, "Expect:"); + curl_easy_setopt(my_curlp, CURLOPT_HTTPHEADER, header_list); + +php:: + + curl_setopt($ch, CURLOPT_HTTPHEADER, array('Expect:')); + +Troubleshooting Tips +==================== + +The throughput statistic is inaccurate +-------------------------------------- + +Traffic Server updates the throughput statistic after it has transferred +an entire object. For larger files, the byte count increases sharply at +the end of a transfer. The complete number of bytes transferred is +attributed to the last 10-second interval, although it can take several +minutes to transfer the object. This inaccuracy is more noticeable with +a light load. A heavier load yields a more accurate statistic. + +You are unable to execute Traffic Line commands +----------------------------------------------- + +Traffic Line commands do not execute under the following conditions: + +- **When the traffic_manager process is not running** Check to see + if the :program:`traffic_manager` process is running by entering the + following command: ``pgrep -l traffic_manager`` + + If the :program:`traffic_manager` process is not running, then enter the + following command from the Traffic Server ``bin`` directory to start it: + ``./traffic_manager`` + + .. this is wrong + + You should always start and stop Traffic Server with the + :program:`trafficserver start`` and :program:`trafficserver stop` commands to ensure + that all the processes start and stop correctly. For more information, + refer to :ref:`getting-started`. + +- **When you are not executing the command from $TSHome/bin** If the Traffic Server + ``bin`` directory is not in your path, then prepend the Traffic Line + commands with ``./`` (for example, ``./traffic_line -h``). + +- **When multiple Traffic Server installations are present and you are not + executing the Traffic Line command from the active Traffic Server path + specified in ``/etc/trafficserver``** + + +You observe inconsistent behavior when one node obtains an object from another node in the cluster +-------------------------------------------------------------------------------------------------- + +As part of the initial system preparation, you must synchronize the +clocks on all nodes in your cluster. Minor time differences do not cause +problems, but differences of more than a few minutes can affect Traffic +Server operation. + +You should run a clock synchronization daemon such as xntpd. To obtain +the latest version of xntpd, go to ``http://www.eecis.udel.edu/~ntp/`` + +Web browsers display an error document with a 'data missing' message +-------------------------------------------------------------------- + +A message similar to the following might display in web browsers: :: + + Data Missing + + This document resulted from a POST operation and has expired from the cache. You can repost the form data to recreate the document by pressing the Reload button. + +This is a Web browser issue and not a problem specific to (or caused by) +Traffic Server. Because Web browsers maintain a separate local cache in +memory and/or disk on the client system, messages about documents that +have expired from cache refer to the browser's local cache and *not* +to the Traffic Server cache. There is no Traffic Server message or +condition that can cause such messages to appear in a web browser. + +Traffic Server does not resolve any websites +-------------------------------------------- + +The browser indicates that it is contacting the host and then times out +with the following message: :: + + The document contains no data; Try again later, or contact the server's Administrator... + +Make sure the system is configured correctly and that Traffic Server can +read the name resolution file: + +- Check if the server can resolve DNS lookups by issuing the nslookup + command (for example, ``nslookup www.myhost.com``). +- Check if the :manpage:`resolv.conf(5)` file contains valid IP addresses + for your DNS servers. +- On some systems, if the :manpage:`resolv.conf(5)` file is unreadable or + has no name server entry, then the operating system uses + ``localhost`` as a name server. Traffic Server, however, does not use + this convention. If you want to use ``localhost`` as a name server, + then you must add a name server entry for ``127.0.0.1`` or + ``0.0.0.0`` in the :manpage:`resolv.conf(5)` file. +- Check that the Traffic Server user account has permission to read the + /etc/resolv.conf file. If it does not, then change the file + permissions to ``rw-r--r--`` (``644``) + +'Maximum document size exceeded' message in the system log file +--------------------------------------------------------------- + +The following message appears in the system log file: :: + + WARNING: Maximum document size exceeded + +A requested object was larger than the maximum size allowed in the +Traffic Server cache, so Traffic Server provided proxy service for the +oversized object but did not cache it. To set the object size limit for +the cache, modify the :ts:cv:`proxy.config.cache.max_doc_size` +variable in the records.config file. If you do not want to limit the +size of objects in the cache, then set the document size +to ``0`` (zero). + +'DrainIncomingChannel' message in the system log file +----------------------------------------------------- + +The following messages may appear in the system log file: :: + + Feb 20 23:53:40 louis traffic_manager[4414]: ERROR ==> [drainIncomingChannel] Unknown message: 'GET http://www.telechamada.pt/ HTTP/1.0' + Feb 20 23:53:46 louis last message repeated 1 time + Feb 20 23:53:58 louis traffic_manager[4414]: ERROR ==> [drainIncomingChannel] Unknown message: 'GET http://www.ip.pt/ HTTP/1.0' + +These error messages indicate that a browser is sending HTTP requests to +one of the Traffic Server cluster ports - either ``rsport`` (default +port 8088) or ``mcport`` (default port 8089). Traffic Server discards +the request; this error does not cause any Traffic Server problems. The +misconfigured browser must be reconfigured to use the correct proxy +port. Traffic Server clusters work best when configured to use a +separate network interface and cluster on a private subnet, so that +client machines have no access to the cluster ports. + +'No cop file' message in the system log file +-------------------------------------------- + +The following message appears repeatedly in the system log file: :: + + traffic_cop[16056]: encountered "var/trafficserver/no_cop" file...exiting + +The file ``var/trafficserver/no_cop`` acts as an administrative control +that instructs the :program:`traffic_cop` process to exit immediately without +starting :program:`traffic_manager` or performing any health checks. The +``no_cop`` file prevents Traffic Server from starting automatically when +it has been stopped with the option:`trafficserver stop` command. Without +this static control, Traffic Server would restart automatically upon +system reboot. The ``no_cop`` control keeps Traffic Server off until it +is explicitly restarted with the :: + + trafficserver start + +command. + + +Warning in the system log file when manually editing vaddrs.config +------------------------------------------------------------------ + +If you manually edit the vaddrs.config file as a non-root user, then +Traffic Server issues a warning message in the system log file similar +to the following:: + + WARNING: interface is ignored: Operation not permitted + +You can safely ignore this message; Traffic Server *does* apply your +configuration edits. + +Traffic Server is running but no log files are created +------------------------------------------------------ + +Traffic Server only writes event log files when there is information to +record. If Traffic Server is idle, then it's possible/probable that no +log files exist. In addition: + +Make sure you're looking in the correct directory. By default, Traffic +Server creates log files in the logs directory. Check the location of +log files by checking the value of the variable +proxy.config.log.logfile_dir in the records.config file. Check that the +log directory has read/write permissions for the Traffic Server user +account. If the log directory does not have the correct permissions, +then the traffic_server process is unable to open or create log files. +Check that logging is enabled by checking the value of the +proxy.config.log.logging_enabled variable in the records.config file. +Check that a log format is enabled. In the records.config file, select +the standard or custom format by editing variables in the Logging Config +section. + +Traffic Server shows an error indicating too many network connections +--------------------------------------------------------------------- + +By default, Traffic Server supports 8000 network connections: half of +this number is allocated for client connections and the remaining half +is for origin server connections. A **connection throttle event** occurs +when client or origin server connections reach 90% of half the +configured limit (3600 by default). When a connection throttle event +occurs, Traffic Server continues processing all existing connections but +will not accept new client connection requests until the connection +count falls below the limit. + +Connection throttle events can occur under the following conditions: + +- If there is a **connection spike** (e.g., if thousands of client + requests all reach Traffic Server at the same time). Such events are + typically transient and require no corrective action. +- If there is a **service overload** (e.g., if client requests + continuously arrive faster than Traffic Server can service them). + Service overloads often indicate network problems between Traffic + Server and origin servers. Conversely, it may indicate that Traffic + Server needs more memory, CPU, cache disks, or other resources to + handle the client load. + +If necessary, you can reset the maximum number of connections supported +by Traffic Server by editing the value of the +:ts:cv:`proxy.config.net.connections_throttle` configuration variable in +the records.config file. Do not increase the connection throttle limit +unless the system has adequate memory to handle the client connections +required. A system with limited RAM might need a throttle limit lower +than the default value. Do not set this variable below the minimum value +of 100. + +Low memory symptoms +------------------- + +Under heavy load, the Linux kernel can run out of RAM. This low memory +condition can cause slow performance and a variety of other system +problems. In fact, RAM exhaustion can occur even if the system has +plenty of free swap space. + +Symptoms of extreme memory exhaustion include the following messages in +the system log files (``/var/log/messages``):: + + WARNING: errno 105 is ENOBUFS (low on kernel memory), consider a memory upgrade + + kernel: eth0: can't fill rx buffer (force 0)! + + kernel: recvmsg bug: copied E01BA916 seq E01BAB22 + +To avoid memory exhaustion, add more RAM to the system or reduce the +load on Traffic Server. + +Connection timeouts with the origin server +------------------------------------------ + +Certain origin servers take longer than 30 seconds to post HTTP +requests, which results in connection timeouts with Traffic Server. To +prevent such connection timeouts, you must change the value of the +configuration variable proxy.config.http.connect_attempts_timeout in +the records.config file to 60 seconds or more. + diff --git a/doc/admin/forward-proxy.en.rst b/doc/admin/forward-proxy.en.rst new file mode 100644 index 00000000..03c3d774 --- /dev/null +++ b/doc/admin/forward-proxy.en.rst @@ -0,0 +1,80 @@ +.. _forward-proxy: + +Forward Proxy +************* + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +The Apache Traffic Server is a general purpose *proxy*. As such it can +also be used as forward proxy. + +A forward proxy is can be used as a central tool in your infrastructure +to access the web. In combination with a cache that means overall +reduced bandwidth usage. + +If your forward proxy is not also configured as :ref:`transparent-proxy` +your clients will have to be configured to actually use it. + +The main difference between a forward and a transparent proxy is that +User Agents *know* that they are accessing a proxy, thus forming their +requests like so: :: + + GET http://example.com/index.php?id=1337 HTTP/1.1 + +This request, then is translated by the proxy to:: + + GET /index?id=1337 HTTP/1.1 + Host: example.com + +Apache Traffic Server offers two ways to User Agents: They can either be +pointed directly to the default ``8080`` port. Alternatively, they can +be pointed to the more dynamic :ts:cv:`proxy.config.url_remap.default_to_server_pac` + +This port will then serve a JavaScript like configuration that User +Agents can use to determine where to send their requests to. + +Configuration +============= + +In order to configure Apache Traffic Server as forward proxy you will +have to edit :file:`records.config` and set + +- :ts:cv:`proxy.config.url_remap.remap_required` to ``0`` + +If your proxy is serving as *pure* forward proxy, you will also want to +set + +- :ts:cv:`proxy.config.reverse_proxy.enabled` to ``0`` + +Other configuration variables to consider: + +- :ts:cv:`proxy.config.http.no_dns_just_forward_to_parent` +- :ts:cv:`proxy.config.http.forward.proxy_auth_to_parent` +- :ts:cv:`proxy.config.http.insert_squid_x_forwarded_for` + +Security Considerations +======================= + +It's important to note that once your Apache Traffic Server is +configured as forward proxy it will indiscriminately accept proxy +requests from anyone. That means, if it's reachable on the internet, you +have configured an *Open Proxy*. Most of the time, this is *not* what +you want, so you'll have to make sure it's either only reachable within +your NAT or is secured by firewall rules that permit only those clients +to access it which you want to it to access. + diff --git a/doc/admin/getting-started.en.rst b/doc/admin/getting-started.en.rst new file mode 100644 index 00000000..bdb68d87 --- /dev/null +++ b/doc/admin/getting-started.en.rst @@ -0,0 +1,189 @@ + +.. _getting-started: + +Getting Started +*************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +.. toctree:: + :maxdepth: 2 + +Before you start +================ + +Before you get started with Traffic Server you may have to decide which +version you want to use. Traffic Server follows the `Semantic Versioning +`_ guidelines, in summary + +A version is made of a version-triplet: ``MAJOR.MINOR.PATCH`` + +As of v4.0.0, there are no longer any development (or unstable) releases. +All releases are considered stable and ready for production use, releases +within a major version are always upgrade compatible. More details are +available on the `Wiki page +`_. + +Sometimes we speak of trunk, master or HEAD, all of which are used +interchangeably: trunk or master or sometimes TIP or HEAD, refer to the +latest code in a Git Version Control System. Master is always kept releasable, +and compatible with the current major release version. Incompatible changes +are sometimes committed on a next-major release branch, for example we have +the ``5.0.x`` branch where changes incompatible with 4.x are managed. + +If your distribution does not come with a prepackaged Traffic Server, +please go to `downloads `_ to choose the version that you +consider most appropriate for yourself. If you want to really be on the +bleeding edge you can clone our `git +repository `_. + +Please note that while we do have a `GitHub +Mirror `_ that you can also use +to submit pull requests, it may not be entirely up-to-date. + +Building Traffic Server +======================= + +In order to build Traffic Server from source you will need the following +(development) packages: + +- pkgconfig +- libtool +- gcc (>= 4.3 or clang > 3.0) +- make (GNU Make!) +- openssl +- tcl +- expat +- pcre +- libcap +- flex (for TPROXY) +- hwloc +- lua +- curses +- curl (both for :program:`traffic_top`) + +if you're building from a git clone, you'll also need + +- git +- autoconf +- automake + +We will show-case a build from git:: + + git clone https://git-wip-us.apache.org/repos/asf/trafficserver.git + +Next, we ``cd trafficserver`` and run:: + + autoreconf -if + +This will generate a ``configure`` file from ``configure.ac``, so now we +can run that:: + + ./configure --prefix=/opt/ats + +Note well, that by default Traffic Server uses the user ``nobody``, as +well as user's primary group as Traffic Server user. If you want to +change that, you can override it here:: + + ./configure --prefix=/opt/ats --with-user=tserver + +If dependencies are not in standard paths (``/usr/local`` or ``/usr``), +you need to pass options to ``configure`` to account for that:: + + ./configure --prefix=/opt/ats --with-user=tserver --with-lua=/opt/csw + +Most ``configure`` path-options accept a format of +``"INCLUDE_PATH:LIBRARY_PATH"``:: + + ./configure --prefix=/opt/ats --with-user=tserver --with-lua=/opt/csw \ + --with-pcre=/opt/csw/include:/opt/csw/lib/amd64 + +We can run ``make`` to build the project. We highly recommend to run +``make check`` to verify the build's general sanity:: + + make + make check + +We can finally run ``make install`` to install (you may have to switch +to root to do this):: + + sudo make install + +We also recommend to run a regression test. Please note that this will +only work successfully with the default ``layout``:: + + cd /opt/ats + sudo bin/traffic_server -R 1 + +After you have installed Traffic Server on your system, you can do any +of the following: + +.. _start-traffic-server: + +Start Traffic Server +==================== + +To start Traffic Server manually, issue the ``trafficserver`` command, +passing in the attribute ``start``. This command starts all the +processes that work together to process Traffic Server requests as well +as manage, control, and monitor the health of the Traffic Server system. + +To run the ``trafficserver start`` command, e.g.:: + + bin/trafficserver start + +At this point your server is up and running in the default configuration +of a :ref:`reverse-proxy-and-http-redirects`. + +.. _start-straffic-line: + +Start Traffic Line +================== + +Traffic Line provides a quick way of viewing Traffic Server statistics +and configuring the Traffic Server system via command-line interface. To +execute individual commands or script multiple commands, refer to +:program:`traffic_line`. + +Traffic Line commands take the following form:: + + bin/traffic_line -command argument + +For a list of :program:`traffic_line` commands, enter:: + + bin/traffic_line -h + +Please note that :program:`traffic_line`, while a fine tool for an +administrator, is a poor choice for automation, especially that of +monitoring. See our chapter on :ref:`monitoring-traffic` +for how to do that better. + +.. _stop-traffic-server: + +Stop Traffic Server +=================== + +To stop Traffic Server, always use the :program:`trafficserver` command, +passing in the attribute ``stop``. This command stops all the Traffic +Server processes (:program:`traffic_manager`, :program:`traffic_server`, and +:program:`traffic_cop`). Do not manually stop processes, as this can lead to +unpredictable results.:: + + bin/trafficserver stop + diff --git a/doc/admin/hierachical-caching.en.rst b/doc/admin/hierachical-caching.en.rst new file mode 100644 index 00000000..66a1a5e8 --- /dev/null +++ b/doc/admin/hierachical-caching.en.rst @@ -0,0 +1,180 @@ +.. _hierarchical-caching: + +Hierarchical Caching +******************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +.. toctree:: + :maxdepth: 2 + +Understanding Cache Hierarchies +=============================== + +A cache hierarchy consists of cache levels that communicate with each +other. Traffic Server supports several types of cache hierarchies. All +cache hierarchies recognize the concept of **parent** and **child**. A +parent cache is a cache higher up in the hierarchy, to which Traffic +Server can forward requests. A child cache is a cache for which Traffic +Server is a parent. + +Traffic Server supports the following hierarchical caching options: + +Parent Caching +============== + +If a Traffic Server node cannot find a requested object in its cache, +then it searches a parent cache (which itself can search other caches) +before finally retrieving the object from the origin server. You can +configure a Traffic Server node to use one or more parent caches so that +if one parent is unavailable, then another parent is availale to service +requests. This is called `Parent Failover`_. Traffic +Server will support parent caching for HTTP and HTTPS requests. + +**Note:** If you do not want all requests to go to the parent cache, +then simply configure Traffic Server to route certain requests (such as +requests containing specific URLs) directly to the origin server. Simply +set parent proxy rules in :file:`parent.config` + +The figure below illustrates a simple cache hierarchy with a Traffic +Server node configured to use a parent cache. In the following scenario, +a client sends a request to a Traffic Server node that is a child in the +cache hierarchy (because it's configured to forward missed requests to a +parent cache). The request is a cache miss, so Traffic Server then +forwards the request to the parent cache, where it is a cache hit. The +parent sends a copy of the content to the Traffic Server, where it is +cached and then served to the client. Future requests for this content +can now be served directly from the Traffic Server cache (until the data +is stale or expired). + +.. figure:: ../static/images/admin/cachehrc.jpg + :align: center + :alt: Parent caching + + Parent caching + +**Note:** If the request is a cache miss on the parent, then the parent +retrieves the content from the origin server (or from another cache, +depending on the parent’s configuration). The parent caches the content +and then sends a copy to Traffic Server (its child), where it is cached +and served to the client. + +Parent Failover +--------------- + +Traffic Server supports use of several parent caches. This ensures that +if one parent cache is not available, another parent cache can service +client requests. + +When you configure your Traffic Server to use more than one parent +cache, Traffic Server detects when a parent is not available and sends +missed requests to another parent cache. If you specify more than two +parent caches, then the order in which the parent caches are queried +depends upon the parent proxy rules configured in the file:`parent.config` +configuration file. By default, the parent caches are queried in the +order they are listed in the configuration file. + +.. _configuring-traffic-server-to-use-a-parent-cache: + +Configuring Traffic Server to Use a Parent Cache +------------------------------------------------ + +To configure Traffic Server to use one or more parent caches, you must +complete the following steps: + +- Enable the parent caching option. +- Identify the parent cache you want to use to service missed requests. + To use **parent failover**, you must identify more than one parent + cache so that when a parent cache is unavailable, requests are sent + to another parent cache. + +.. note:: You need to configure the child cache only. No additional configuration is needed for the Traffic Server parent cache. + +Configure Traffic Server to use a parent cache by editing the following +variable :ts:cv:`proxy.config.http.parent_proxy_routing_enable` in :file:`records.config` file. + +Edit the :file:`parent.config` file located in the Traffic Server ``config`` directory to set parent +proxy rules to specify the parent cache to which you want missed +requests to be forwarded; + +The following example configures Traffic Server to route all requests +containing the regular expression ``politics`` and the path +``/viewpoint`` directly to the origin server (bypassing any parent +hierarchies): :: + + url_regex=politics prefix=/viewpoint go_direct=true + +The following example configures Traffic Server to direct all missed +requests with URLs beginning with ``http://host1`` to the parent cache +``parent1``. If ``parent1`` cannot serve the requests, then requests are +forwarded to ``parent2``. Because ``round-robin=true``, Traffic Server +goes through the parent cache list in a round-robin based on client IP +address.:: + + dest_host=host1 scheme=http parent="parent1;parent2" round-robin=strict + +Run the command :option:`traffic_line -x` to apply the configuration changes. + +.. XXX As of yet, this is unsupported. + +.. # ICP Peering # {#ICPPeering} + +.. The Internet Cache Protocol (ICP) is used by proxy caches to exchange information + about their content. ICP query messages ask other caches if they are storing + a particular URL; ICP response messages reply with a hit or miss answer. A + cache exchanges ICP messages only with specific **ICP peers**, which are neighboring + caches that can receive ICP messages. An ICP peer can be a **sibling cache + **(which is at the same level in the hierarchy) or a **parent cache** (which + is one level up in the hierarchy). + +.. If Traffic Server has ICP caching enabled, then it sends ICP queries to its + ICP peers when the HTTP request is a cache miss. If there are no hits but parents + exist, then a parent is selected using a round-robin policy. If no ICP parents + exist, then Traffic Server forwards the request to its HTTP parents. If there + are no HTTP parent caches established, then Traffic Server forwards the request + to the origin server. + +.. If Traffic Server receives a hit message from an ICP peer, then Traffic Server + sends the HTTP request to that peer. However, it might turn out to be a cache + miss because the original HTTP request contains header information that is + not communicated by the ICP query. For example, the hit might not be the requested + alternate. If an ICP hit turns out to be a miss, then Traffic Server forwards + the request to either its HTTP parent caches or to the origin server. + +.. To configure a Traffic Server node to be part of an ICP cache hierarchy, you + must perform the following tasks: + +.. * Determine if the Traffic Server can receive ICP messages only, or if it can send _and_ receive ICP messages. + * Determine if Traffic Server can send messages directly to each ICP peer or send a single message on a specified multicast channel. + * Specify the port used for ICP messages. + * Set the ICP query timeout. + * Identify the ICP peers (siblings and parents) with which Traffic Server can communicate. + +.. To configure Traffic Server to use an ICP cache hierarchy edit the following variables in :file:`records.config` file: + +.. * :ts:cv:`proxy.config.icp.enabled` + * :ts:cv:`proxy.config.icp.icp_port` + * :ts:cv:`proxy.config.icp.multicast_enabled` + * :ts:cv:`proxy.config.icp.query_timeout` + +.. Edit :file:`icp.config` file located in the Traffic Server `config` directory: + For each ICP peer you want to identify, enter a separate rule in the :file:`icp.config` file. + +.. Run the command :option:`traffic_line -x` to apply the configuration changes. + diff --git a/doc/admin/http-proxy-caching.en.rst b/doc/admin/http-proxy-caching.en.rst new file mode 100644 index 00000000..e305007e --- /dev/null +++ b/doc/admin/http-proxy-caching.en.rst @@ -0,0 +1,846 @@ +.. _http-proxy-caching: + +HTTP Proxy Caching +****************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Web proxy caching enables you to store copies of frequently-accessed web +objects (such as documents, images, and articles) and then serve this +information to users on demand. It improves performance and frees up +Internet bandwidth for other tasks. + +.. toctree:: + :maxdepth: 2 + +Understanding HTTP Web Proxy Caching +==================================== + +Internet users direct their requests to web servers all over the +Internet. A caching server must act as a **web proxy server** so it can +serve those requests. After a web proxy server receives requests for web +objects, it either serves the requests or forwards them to the **origin +server** (the web server that contains the original copy of the +requested information). The Traffic Server proxy supports **explicit +proxy caching**, in which the user's client software must be configured +to send requests directly to the Traffic Server proxy. The following +overview illustrates how Traffic Server serves a request. + +1. Traffic Server receives a client request for a web object. + +2. Using the object address, Traffic Server tries to locate the + requested object in its object database (**cache**). + +3. If the object is in the cache, then Traffic Server checks to see if + the object is fresh enough to serve. If it is fresh, then Traffic + Server serves it to the client as a **cache hit** (see the figure + below). + + .. figure:: ../static/images/admin/cache_hit.jpg + :align: center + :alt: A cache hit + + A cache hit + +4. If the data in the cache is stale, then Traffic Server connects to + the origin server and checks if the object is still fresh (a + :term:`revalidation`). If it is, then Traffic Server immediately sends + the cached copy to the client. + +5. If the object is not in the cache (a **cache miss**) or if the server + indicates the cached copy is no longer valid, then Traffic Server + obtains the object from the origin server. The object is then + simultaneously streamed to the client and the Traffic Server local + cache (see the figure below). Subsequent requests for the object can + be served faster because the object is retrieved directly from cache. + + .. figure:: ../static/images/admin/cache_miss.jpg + :align: center + :alt: A cache miss + + A cache miss + +Caching is typically more complex than the preceding overview suggests. +In particular, the overview does not discuss how Traffic Server ensures +freshness, serves correct HTTP alternates, and treats requests for +objects that cannot/should not be cached. The following sections discuss +these issues in greater detail. + +Ensuring Cached Object Freshness +================================ + +When Traffic Server receives a request for a web object, it first tries +to locate the requested object in its cache. If the object is in cache, +then Traffic Server checks to see if the object is fresh enough to +serve. For HTTP objects, Traffic Server supports optional +author-specified expiration dates. Traffic Server adheres to these +expiration dates; otherwise, it picks an expiration date based on how +frequently the object is changing and on administrator-chosen freshness +guidelines. Objects can also be revalidated by checking with the origin +server to see if an object is still fresh. + +HTTP Object Freshness +--------------------- + +Traffic Server determines whether an HTTP object in the cache is fresh +by: + +- **Checking the** ``Expires`` **or** ``max-age`` **header** + + Some HTTP objects contain ``Expires`` headers or ``max-age`` headers + that explicitly define how long the object can be cached. Traffic + Server compares the current time with the expiration time to + determine if the object is still fresh. + +- **Checking the** ``Last-Modified`` **/** ``Date`` **header** + + If an HTTP object has no ``Expires`` header or ``max-age`` header, + then Traffic Server can calculate a freshness limit using the + following formula:: + + freshness_limit = ( date - last_modified ) * 0.10 + + where *date* is the date in the object's server response header + and *last_modified* is the date in the ``Last-Modified`` header. + If there is no ``Last-Modified`` header, then Traffic Server uses the + date the object was written to cache. The value ``0.10`` (10 percent) + can be increased or reduced to better suit your needs (refer to + `Modifying Aging Factor for Freshness Computations`_). + + The computed freshness limit is bound by a minimum and maximum value + - refer to `Setting Absolute Freshness Limits`_ for more information. + +- **Checking the absolute freshness limit** + + For HTTP objects that do not have ``Expires`` headers or do not have + both ``Last-Modified`` and ``Date`` headers, Traffic Server uses a + maximum and minimum freshness limit (refer to `Setting Absolute Freshness Limits`_). + +- **Checking revalidate rules in the** :file:`cache.config` **file** + + Revalidate rules apply freshness limits to specific HTTP objects. You + can set freshness limits for objects originating from particular + domains or IP addresses, objects with URLs that contain specified + regular expressions, objects requested by particular clients, and so + on (refer to :file:`cache.config`). + +Modifying Aging Factor for Freshness Computations +------------------------------------------------- + +If an object does not contain any expiration information, then Traffic +Server can estimate its freshness from the ``Last-Modified`` and +``Date`` headers. By default, Traffic Server stores an object for 10% of +the time that elapsed since it last changed. You can increase or reduce +the percentage according to your needs. + +To modify the aging factor for freshness computations + +1. Change the value for :ts:cv:`proxy.config.http.cache.heuristic_lm_factor`. + +2. Run the :option:`traffic_line -x` command to apply the configuration + changes. + +Setting absolute Freshness Limits +--------------------------------- + +Some objects do not have ``Expires`` headers or do not have both +``Last-Modified`` and ``Date`` headers. To control how long these +objects are considered fresh in the cache, specify an **absolute +freshness limit**. + +To specify an absolute freshness limit + +1. Edit the variables + + - :ts:cv:`proxy.config.http.cache.heuristic_min_lifetime` + - :ts:cv:`proxy.config.http.cache.heuristic_max_lifetime` + +2. Run the :option:`traffic_line -x` command to apply the configuration + changes. + +Specifying Header Requirements +------------------------------ + +To further ensure freshness of the objects in the cache, configure +Traffic Server to cache only objects with specific headers. By default, +Traffic Server caches all objects (including objects with no headers); +you should change the default setting only for specialized proxy +situations. If you configure Traffic Server to cache only HTTP objects +with ``Expires`` or ``max-age`` headers, then the cache hit rate will be +noticeably reduced (since very few objects will have explicit expiration +information). + +To configure Traffic Server to cache objects with specific headers + +1. Change the value for :ts:cv:`proxy.config.http.cache.required_headers`. + +2. Run the :option:`traffic_line -x` command to apply the configuration + changes. + +.. _cache-control-headers: + +Cache-Control Headers +--------------------- + +Even though an object might be fresh in the cache, clients or servers +often impose their own constraints that preclude retrieval of the object +from the cache. For example, a client might request that a object *not* +be retrieved from a cache, or if it does, then it cannot have been +cached for more than 10 minutes. Traffic Server bases the servability of +a cached object on ``Cache-Control`` headers that appear in both client +requests and server responses. The following ``Cache-Control`` headers +affect whether objects are served from cache: + +- The ``no-cache`` header, sent by clients, tells Traffic Server that + it should not serve any objects directly from the cache; + therefore, Traffic Server will always obtain the object from the + origin server. You can configure Traffic Server to ignore client + ``no-cache`` headers - refer to `Configuring Traffic Server to Ignore Client no-cache Headers`_ + for more information. + +- The ``max-age`` header, sent by servers, is compared to the object + age. If the age is less than ``max-age``, then the object is fresh + and can be served. + +- The ``min-fresh`` header, sent by clients, is an **acceptable + freshness tolerance**. This means that the client wants the object to + be at least this fresh. Unless a cached object remains fresh at least + this long in the future, it is revalidated. + +- The ``max-stale`` header, sent by clients, permits Traffic Server to + serve stale objects provided they are not too old. Some browsers + might be willing to take slightly stale objects in exchange for + improved performance, especially during periods of poor Internet + availability. + +Traffic Server applies ``Cache-Control`` servability criteria +***after*** HTTP freshness criteria. For example, an object might be +considered fresh but will not be served if its age is greater than its +``max-age``. + +Revalidating HTTP Objects +------------------------- + +When a client requests an HTTP object that is stale in the cache, +Traffic Server revalidates the object. A **revalidation** is a query to +the origin server to check if the object is unchanged. The result of a +revalidation is one of the following: + +- If the object is still fresh, then Traffic Server resets its + freshness limit and serves the object. + +- If a new copy of the object is available, then Traffic Server caches + the new object (thereby replacing the stale copy) and simultaneously + serves the object to the client. + +- If the object no longer exists on the origin server, then Traffic + Server does not serve the cached copy. + +- If the origin server does not respond to the revalidation query, then + Traffic Server serves the stale object along with a + ``111 Revalidation Failed`` warning. + +By default, Traffic Server revalidates a requested HTTP object in the +cache if it considers the object to be stale. Traffic Server evaluates +object freshness as described in `HTTP Object Freshness`_. +You can reconfigure how Traffic Server evaluates freshness by selecting +one of the following options: + +- Traffic Server considers all HTTP objects in the cache to be stale: + always revalidate HTTP objects in the cache with the origin server. +- Traffic Server considers all HTTP objects in the cache to be fresh: + never revalidate HTTP objects in the cache with the origin server. +- Traffic Server considers all HTTP objects without ``Expires`` or + ``Cache-control`` headers to be stale: revalidate all HTTP objects + without ``Expires`` or ``Cache-Control`` headers. + +To configure how Traffic Server revalidates objects in the cache, you +can set specific revalidation rules in :file:`cache.config`. + +To configure revalidation options + +1. Edit the following variable in :file:`records.config` + + - :ts:cv:`proxy.config.http.cache.when_to_revalidate` + +2. Run the :option:`traffic_line -x` command to apply the configuration + changes. + +Scheduling Updates to Local Cache Content +========================================= + +To further increase performance and to ensure that HTTP objects are +fresh in the cache, you can use the **Scheduled Update** option. This +configures Traffic Server to load specific objects into the cache at +scheduled times. You might find this especially beneficial in a reverse +proxy setup, where you can *preload* content you anticipate will be in +demand. + +To use the Scheduled Update option, you must perform the following +tasks. + +- Specify the list of URLs that contain the objects you want to + schedule for update, +- the time the update should take place, +- and the recursion depth for the URL. +- Enable the scheduled update option and configure optional retry + settings. + +Traffic Server uses the information you specify to determine URLs for +which it is responsible. For each URL, Traffic Server derives all +recursive URLs (if applicable) and then generates a unique URL list. +Using this list, Traffic Server initiates an HTTP ``GET`` for each +unaccessed URL. It ensures that it remains within the user-defined +limits for HTTP concurrency at any given time. The system logs the +completion of all HTTP ``GET`` operations so you can monitor the +performance of this feature. + +Traffic Server also provides a **Force Immediate Update** option that +enables you to update URLs immediately without waiting for the specified +update time to occur. You can use this option to test your scheduled +update configuration (refer to `Forcing an Immediate Update`_). + +Configuring the Scheduled Update Option +--------------------------------------- + +To configure the scheduled update option + +1. Edit :file:`update.config` to + enter a line in the file for each URL you want to update. +2. Edit the following variables + + - :ts:cv:`proxy.config.update.enabled` + - :ts:cv:`proxy.config.update.retry_count` + - :ts:cv:`proxy.config.update.retry_interval` + - :ts:cv:`proxy.config.update.concurrent_updates` + +3. Run the :option:`traffic_line -x` command to apply the configuration + changes. + +Forcing an Immediate Update +--------------------------- + +Traffic Server provides a **Force Immediate Update** option that enables +you to immediately verify the URLs listed in :file:`update.config`. +The Force Immediate Update option disregards the offset hour and +interval set in :file:`update.config` and immediately updates the +URLs listed. + +To configure the Force Immediate Update option + +1. Edit the following variables + + - :ts:cv:`proxy.config.update.force` + - Make sure :ts:cv:`proxy.config.update.enabled` is set to 1. + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +.. important:: + + When you enable the Force Immediate Update option, Traffic Server continually updates the URLs specified in + :file:`update.config` until you disable the option. To disable the Force Immediate Update option, set + :ts:cv:`proxy.config.update.force` to ``0`` (zero). + +Pushing Content into the Cache +============================== + +Traffic Server supports the HTTP ``PUSH`` method of content delivery. +Using HTTP ``PUSH``, you can deliver content directly into the cache +without client requests. + +Configuring Traffic Server for PUSH Requests +-------------------------------------------- + +Before you can deliver content into your cache using HTTP ``PUSH``, you +must configure Traffic Server to accept ``PUSH`` requests. + +To configure Traffic Server to accept ``PUSH`` requests + +1. Edit :file:`ip_allow.config` to allow ``PUSH``. + +2. Edit the following variable in :file:`records.config`, enable + the push_method. + + - :ts:cv:`proxy.config.http.push_method_enabled` + +3. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Understanding HTTP PUSH +----------------------- + +``PUSH`` uses the HTTP 1.1 message format. The body of a ``PUSH`` +request contains the response header and response body that you want to +place in the cache. The following is an example of a ``PUSH`` request:: + + PUSH http://www.company.com HTTP/1.0 + Content-length: 84 + + HTTP/1.0 200 OK + Content-type: text/html + Content-length: 17 + + + a + + +.. important:: + + Your header must include ``Content-length`` - ``Content-length`` must include both ``header`` and ``body byte + count``. + +Tools that will help manage pushing +----------------------------------- + +There is a perl script for pushing, :program:`tspush`, +which can help you understanding how to write scripts for pushing +content yourself. + +Pinning Content in the Cache +============================ + +The **Cache Pinning Option** configures Traffic Server to keep certain +HTTP objects in the cache for a specified time. You can use this option +to ensure that the most popular objects are in cache when needed and to +prevent Traffic Server from deleting important objects. Traffic Server +observes ``Cache-Control`` headers and pins an object in the cache only +if it is indeed cacheable. + +To set cache pinning rules + +1. Make sure the following variable in :file:`records.config` is set + + - :ts:cv:`proxy.config.cache.permit.pinning` + +2. Add a rule in :file:`cache.config` for each + URL you want Traffic Server to pin in the cache. For example:: + + url_regex=^https?://(www.)?apache.org/dev/ pin-in-cache=12h + +3. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +To Cache or Not to Cache? +========================= + +When Traffic Server receives a request for a web object that is not in +the cache, it retrieves the object from the origin server and serves it +to the client. At the same time, Traffic Server checks if the object is +cacheable before storing it in its cache to serve future requests. + +Caching HTTP Objects +==================== + +Traffic Server responds to caching directives from clients and origin +servers, as well as directives you specify through configuration options +and files. + +Client Directives +----------------- + +By default, Traffic Server does *not* cache objects with the following +**request headers**: + +- ``Authorization``: header + +- ``Cache-Control: no-store`` header + +- ``Cache-Control: no-cache`` header + + To configure Traffic Server to ignore the ``Cache-Control: no-cache`` + header, refer to `Configuring Traffic Server to Ignore Client no-cache Headers`_ + +- ``Cookie``: header (for text objects) + + By default, Traffic Server caches objects served in response to + requests that contain cookies (unless the object is text). You can + configure Traffic Server to not cache cookied content of any type, + cache all cookied content, or cache cookied content that is of image + type only. For more information, refer to `Caching Cookied Objects`_. + +Configuring Traffic Server to Ignore Client no-cache Headers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, Traffic Server strictly observes client +``Cache-Control: no-cache`` directives. If a requested object contains a +``no-cache`` header, then Traffic Server forwards the request to the +origin server even if it has a fresh copy in cache. You can configure +Traffic Server to ignore client ``no-cache`` directives such that it +ignores ``no-cache`` headers from client requests and serves the object +from its cache. + +To configure Traffic Server to ignore client ``no-cache`` headers + +1. Edit the following variable in :file:`records.config` + + - :ts:cv:`proxy.config.http.cache.ignore_client_no_cache` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Origin Server Directives +------------------------ + +By default, Traffic Server does *not* cache objects with the following +**response headers**: + +- ``Cache-Control: no-store`` header +- ``Cache-Control: private`` header +- ``WWW-Authenticate``: header + + To configure Traffic Server to ignore ``WWW-Authenticate`` headers, + refer to `Configuring Traffic Server to Ignore WWW-Authenticate Headers`_. + +- ``Set-Cookie``: header +- ``Cache-Control: no-cache`` headers + + To configure Traffic Server to ignore ``no-cache`` headers, refer to + `Configuring Traffic Server to Ignore Server no-cache Headers`_. + +- ``Expires``: header with value of 0 (zero) or a past date + +Configuring Traffic Server to Ignore Server no-cache Headers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, Traffic Server strictly observes ``Cache-Control: no-cache`` +directives. A response from an origin server with a ``no-cache`` header +is not stored in the cache and any previous copy of the object in the +cache is removed. If you configure Traffic Server to ignore ``no-cache`` +headers, then Traffic Server also ignores ``no-store`` headers. The +default behavior of observing ``no-cache`` directives is appropriate +in most cases. + +To configure Traffic Server to ignore server ``no-cache`` headers + +#. Edit the variable :ts:cv:`proxy.config.http.cache.ignore_server_no_cache` + +#. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Configuring Traffic Server to Ignore WWW-Authenticate Headers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, Traffic Server does not cache objects that contain +``WWW-Authenticate`` response headers. The ``WWW-Authenticate`` header +contains authentication parameters the client uses when preparing the +authentication challenge response to an origin server. + +When you configure Traffic Server to ignore origin server +``WWW-Authenticate`` headers, all objects with ``WWW-Authenticate`` +headers are stored in the cache for future requests. However, the +default behavior of not caching objects with ``WWW-Authenticate`` +headers is appropriate in most cases. Only configure Traffic Server to +ignore server ``WWW-Authenticate`` headers if you are knowledgeable +about HTTP 1.1. + +To configure Traffic Server to ignore server ``WWW-Authenticate`` +headers + +#. Edit the variable :ts:cv:`proxy.config.http.cache.ignore_authentication` + +#. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Configuration Directives +------------------------ + +In addition to client and origin server directives, Traffic Server +responds to directives you specify through configuration options and +files. + +You can configure Traffic Server to do the following: + +- *Not* cache any HTTP objects (refer to `Disabling HTTP Object Caching`_). +- Cache **dynamic content** - that is, objects with URLs that end in + ``.asp`` or contain a question mark (``?``), semicolon + (**``;``**), or **``cgi``**. For more information, refer to `Caching Dynamic Content`_. +- Cache objects served in response to the ``Cookie:`` header (refer to + `Caching Cookied Objects`_. +- Observe ``never-cache`` rules in the :file:`cache.config` file. + +Disabling HTTP Object Caching +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default, Traffic Server caches all HTTP objects except those for +which you have set ``never-cache`` as :ref:`action rules ` +in the :file:`cache.config` file. You can disable HTTP object +caching so that all HTTP objects are served directly from the origin +server and never cached, as detailed below. + +To disable HTTP object caching manually + +1. Set the variable :ts:cv:`proxy.config.http.enabled` to ``0``. + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Caching Dynamic Content +~~~~~~~~~~~~~~~~~~~~~~~ + +A URL is considered **dynamic** if it ends in **``.asp``** or contains a +question mark (``?``), a semicolon (``;``), or ``cgi``. By +default, Traffic Server caches dynamic content. You can configure the +system to ignore dyanamic looking content, although this is recommended +only if the content is *truely* dyanamic, but fails to advertise so with +appropriate ``Cache-Control`` headers. + +To configure Traffic Server's cache behaviour in regard to dynamic +content + +1. Edit the following variable in :file:`records.config` + + - :ts:cv:`proxy.config.http.cache.cache_urls_that_look_dynamic` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Caching Cookied Objects +~~~~~~~~~~~~~~~~~~~~~~~ + +.. XXX This should be extended to xml as well! + +By default, Traffic Server caches objects served in response to requests +that contain cookies. This is true for all types of objects except for +text. Traffic Server does not cache cookied text content because object +headers are stored along with the object, and personalized cookie header +values could be saved with the object. With non-text objects, it is +unlikely that personalized headers are delivered or used. + +You can reconfigure Traffic Server to: + +- *Not* cache cookied content of any type. +- Cache cookied content that is of image type only. +- Cache all cookied content regardless of type. + +To configure how Traffic Server caches cookied content + +1. Edit the variable :ts:cv:`proxy.config.http.cache.cache_responses_to_cookies` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Forcing Object Caching +====================== + +You can force Traffic Server to cache specific URLs (including dynamic +URLs) for a specified duration, regardless of ``Cache-Control`` response +headers. + +To force document caching + +1. Add a rule for each URL you want Traffic Server to pin to the cache + :file:`cache.config`:: + + url_regex=^https?://(www.)?apache.org/dev/ ttl-in-cache=6h + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Caching HTTP Alternates +======================= + +Some origin servers answer requests to the same URL with a variety of +objects. The content of these objects can vary widely, according to +whether a server delivers content for different languages, targets +different browsers with different presentation styles, or provides +different document formats (HTML, XML). Different versions of the same +object are termed **alternates** and are cached by Traffic Server based +on ``Vary`` response headers. You can specify additional request and +response headers for specific ``Content-Type``\s that Traffic Server +will identify as alternates for caching. You can also limit the number +of alternate versions of an object allowed in the cache. + +Configuring How Traffic Server Caches Alternates +------------------------------------------------ + +To configure how Traffic Server caches alternates, follow the steps +below + +1. Edit the following variables + + - :ts:cv:`proxy.config.http.cache.enable_default_vary_headers` + - :ts:cv:`proxy.config.http.cache.vary_default_text` + - :ts:cv:`proxy.config.http.cache.vary_default_images` + - :ts:cv:`proxy.config.http.cache.vary_default_other` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +.. note:: + + If you specify ``Cookie`` as the header field on which to vary + in the above variables, make sure that the variable + :ts:cv:`proxy.config.http.cache.cache_responses_to_cookies` + is set appropriately. + +Limiting the Number of Alternates for an Object +----------------------------------------------- + +You can limit the number of alternates Traffic Server can cache per +object (the default is 3). + +.. important:: + + Large numbers of alternates can affect Traffic Server + cache performance because all alternates have the same URL. Although + Traffic Server can look up the URL in the index very quickly, it must + scan sequentially through available alternates in the object store. + + To limit the number of alternates + + #. Edit the variable :ts:cv:`proxy.config.cache.limits.http.max_alts` + #. Run the command :option:`traffic_line -x` to apply the configuration changes. + + +.. _using-congestion-control: + +Using Congestion Control +======================== + +The **Congestion Control** option enables you to configure Traffic +Server to stop forwarding HTTP requests to origin servers when they +become congested. Traffic Server then sends the client a message to +retry the congested origin server later. + +To use the **Congestion Control** option, you must perform the following +tasks: + +#. Set the variable :ts:cv:`proxy.config.http.congestion_control.enabled` to ``1`` + + - Create rules in the :file:`congestion.config` file to specify: + - which origin servers Traffic Server tracks for congestion + - the timeouts Traffic Server uses, depending on whether a server is + congested + - the page Traffic Server sends to the client when a server becomes + congested + - if Traffic Server tracks the origin servers per IP address or per + hostname + +#. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +.. _transaction-buffering-control: + +Using Transaction Buffering Control +=================================== + +By default I/O operations are run at full speed, as fast as either Traffic Server, the network, or the cache can go. +This can be problematic for large objects if the client side connection is significantly slower. In such cases the +content will be buffered in ram while waiting to be sent to the client. This could potentially also happen for ``POST`` +requests if the client connection is fast and the origin server connection slow. If very large objects are being used +this can cause the memory usage of Traffic Server to become `very large +`_. + +This problem can be ameloriated by controlling the amount of buffer space used by a transaction. A high water and low +water mark are set in terms of bytes used by the transaction. If the buffer space in use exceeds the high water mark, +the connection is throttled to prevent additional external data from arriving. Internal operations continue to proceed +at full speed until the buffer space in use drops below the low water mark and external data I/O is re-enabled. + +Although this is intended primarily to limit the memory usage of Traffic Server it can also serve as a crude rate +limiter by setting a buffer limit and then throttling the client side connection either externally or via a transform. +This will cause the connection to the origin server to be limited to roughly the client side connection speed. + +Traffic Server does network I/O in large chunks (32K or so) and therefore the granularity of transaction buffering +control is limited to a similar precision. + +The buffer size calculations include all elements in the transaction, including any buffers associated with :ref:`transform plugins `. + +Transaction buffering control can be enabled globally by using configuration variables or by :c:func:`TSHttpTxnConfigIntSet` in a plugin. + +================= ================================================== ================================================ +Value Variable :c:func:`TSHttpTxnConfigIntSet` key +================= ================================================== ================================================ +Enable buffering :ts:cv:`proxy.config.http.flow_control.enabled` :c:data:`TS_CONFIG_HTTP_FLOW_CONTROL_ENABLED` +Set high water :ts:cv:`proxy.config.http.flow_control.high_water` :c:data:`TS_CONFIG_HTTP_FLOW_CONTROL_HIGH_WATER` +Set low water :ts:cv:`proxy.config.http.flow_control.low_water` :c:data:`TS_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER` +================= ================================================== ================================================ + +Be careful to always have the low water mark equal or less than the high water mark. If you set only one, the other will +be set to the same value. + +If using :c:func:`TSHttpTxnConfigIntSet`, it must be called no later than :c:data:`TS_HTTP_READ_RESPONSE_HDR_HOOK`. + +.. _reducing-origin-server-requests-avoiding-the-thundering-herd: + +Reducing Origin Server Requests (Avoiding the Thundering Herd) +============================================================== + +When an object can not be served from cache, the request will be proxied to the origin server. For a popular object, +this can result in many near simultaneous requests to the origin server, potentially overwhelming it or associated +resources. There are several features in Traffic Server that can be used to avoid this scenario. + +Read While Writer +----------------- +When Traffic Server goes to fetch something from origin, and upon receiving the response, any number of clients can be allowed to start serving the partially filled cache object once background_fill_completed_threshold % of the object has been received. The difference is that Squid allows this as soon as it goes to origin, whereas ATS can not do it until we get the complete response header. The reason for this is that we make no distinction between cache refresh, and cold cache, so we have no way to know if a response is going to be cacheable, and therefore allow read-while-writer functionality. + +The configurations necessary to enable this in ATS are: + +| CONFIG :ts:cv:`proxy.config.cache.enable_read_while_writer` ``INT 1`` +| CONFIG :ts:cv:`proxy.config.http.background_fill_active_timeout` ``INT 0`` +| CONFIG :ts:cv:`proxy.config.http.background_fill_completed_threshold` ``FLOAT 0.000000`` +| CONFIG :ts:cv:`proxy.config.cache.max_doc_size` ``INT 0`` + +All four configurations are required, for the following reasons: + +- enable_read_while_writer turns the feature on. It's off (0) by default +- The background fill feature should be allowed to kick in for every possible request. This is necessary, in case the writer ("first client session") goes away, someone needs to take over the session. Hence, you should set the background fill timeouts and threshold to zero; this assures they never times out and always is allowed to kick in. +- The proxy.config.cache.max_doc_size should be unlimited (set to 0), since the object size may be unknown, and going over this limit would cause a disconnect on the objects being served. + +Once all this enabled, you have something that is very close, but not quite the same, as Squid's Collapsed Forwarding. + + + +.. _fuzzy-revalidation: + +Fuzzy Revalidation +------------------ +Traffic Server can be set to attempt to revalidate an object before it becomes stale in cache. :file:`records.config` contains the settings: + +| CONFIG :ts:cv:`proxy.config.http.cache.fuzz.time` ``INT 240`` +| CONFIG :ts:cv:`proxy.config.http.cache.fuzz.min_time` ``INT 0`` +| CONFIG :ts:cv:`proxy.config.http.cache.fuzz.probability` ``FLOAT 0.005`` + +For every request for an object that occurs "fuzz.time" before (in the example above, 240 seconds) the object is set to become stale, there is a small +chance (fuzz.probability == 0.5%) that the request will trigger a revalidation request to the origin. For objects getting a few requests per second, this would likely not trigger, but then this feature is not necessary anyways since odds are only 1 or a small number of connections would hit origin upon objects going stale. The defaults are a good compromise, for objects getting roughly 4 requests / second or more, it's virtually guaranteed to trigger a revalidate event within the 240s. These configs are also overridable per remap rule or via a plugin, so can be adjusted per request if necessary. + +Note that if the revalidation occurs, the requested object is no longer available to be served from cache. Subsequent +requests for that object will be proxied to the origin. + +Finally, the fuzz.min_time is there to be able to handle requests with a TTL less than fuzz.time – it allows for different times to evaluate the probability of revalidation for small TTLs and big TTLs. Objects with small TTLs will start "rolling the revalidation dice" near the fuzz.min_time, while objects with large TTLs would start at fuzz.time. A logarithmic like function between determines the revalidation evaluation start time (which will be between fuzz.min_time and fuzz.time). As the object gets closer to expiring, the window start becomes more likely. By default this setting is not enabled, but should be enabled anytime you have objects with small TTLs. Note that this option predates overridable configurations, so you can achieve something similar with a plugin or remap.config conf_remap.so configs. + +These configurations are similar to Squid's refresh_stale_hit configuration option. + + +Open Read Retry Timeout +----------------------- + +The open read retry configurations attempt to reduce the number of concurrent requests to the origin for a given object. While an object is being fetched from the origin server, subsequent requests would wait open_read_retry_time milliseconds before checking if the object can be served from cache. If the object is still being fetched, the subsequent requests will retry max_open_read_retries times. Thus, subsequent requests may wait a total of (max_open_read_retries x open_read_retry_time) milliseconds before establishing an origin connection of its own. For instance, if they are set to 5 and 10 respectively, connections will wait up to 50ms for a response to come back from origin from a previous request, until this request is allowed through. + +These settings are inappropriate when objects are uncacheable. In those cases, requests for an object effectively become serialized. The subsequent requests would await at least open_read_retry_time milliseconds before being proxies to the origin. + +Similarly, this setting should be used in conjunction with Read While Writer for big (those that take longer than (max_open_read_retries x open_read_retry_time) milliseconds to transfer) cacheable objects. Without the read-while-writer settings enabled, while the initial fetch is ongoing, not only would subsequent requests be delayed by the maximum time, but also, those requests would result in another request to the origin server. + +Since ATS now supports setting these settings per-request or remap rule, you can configure this to be suitable for your setup much more easily. + +The configurations are (with defaults): + +| CONFIG :ts:cv:`proxy.config.http.cache.max_open_read_retries` ``INT -1`` +| CONFIG :ts:cv:`proxy.config.http.cache.open_read_retry_time` ``INT 10`` + +The default means that the feature is disabled, and every connection is allowed to go to origin instantly. When enabled, you will try max_open_read_retries times, each with a open_read_retry_time timeout. diff --git a/doc/admin/index.en.rst b/doc/admin/index.en.rst new file mode 100644 index 00000000..3b1229b7 --- /dev/null +++ b/doc/admin/index.en.rst @@ -0,0 +1,319 @@ +.. _admin-guide: + +Administrators' Guide +********************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +Apache Traffic Server™ speeds Internet access, enhances website +performance, and delivers unprecedented web hosting capabilities. + +This chapter discusses how: + +Contents: + +.. toctree:: + :maxdepth: 2 + + getting-started.en + http-proxy-caching.en + reverse-proxy-http-redirects.en + forward-proxy.en + transparent-proxy.en + explicit-proxy-caching.en + session-protocol.en + hierachical-caching.en + configuring-cache.en + monitoring-traffic.en + configuring-traffic-server.en + cluster-howto.en + security-options.en + working-log-files.en + event-logging-formats.en + traffic-server-error-messages.en + performance-tuning.en + faqs.en + + +What Is Apache Traffic Server? +============================== + +Global data networking has become part of everyday life: Internet users +request billions of documents and terabytes of data, on a daily basis, +to and from all parts of the world. Information is free, abundant, and +accessible. Unfortunately, global data networking can also be a +nightmare for IT professionals as they struggle with overloaded servers +and congested networks. It can be challenging to consistently and +reliably accommodate society’s growing data demands. + +Traffic Server is a high-performance web proxy cache that improves +network efficiency and performance by caching frequently-accessed +information at the edge of the network. This brings content physically +closer to end users, while enabling faster delivery and reduced +bandwidth use. Traffic Server is designed to improve content delivery +for enterprises, Internet service providers (ISPs), backbone providers, +and large intranets by maximizing existing and available bandwidth. + +Traffic Server Deployment Options +================================= + +To best suit your needs, Traffic Server can be deployed in several ways: + +- As a web proxy cache +- As a reverse proxy +- In a cache hierarchy + +The following sections provide a summary of these Traffic Server +deployment options. Please keep in mind that with every of these options +Traffic Server can be run as a *single instance*, or as a *multi-node +cluster*. + +Traffic Server as a Web Proxy Cache +----------------------------------- + +As a web proxy cache, Traffic Server receives user requests for web +content as those requests travel to the destined web server (origin +server). If Traffic Server contains the requested content, then it +serves the content directly. If the requested content is not available +from cache, then Traffic Server acts as a proxy: it obtains the content +from the origin server on the user’s behalf and also keeps a copy to +satisfy future requests. + +Traffic Server provides explicit proxy caching, in which the user’s +client software must be configured to send requests directly to Traffic +Server. Explicit proxy caching is described in the :ref:`explicit-proxy-caching` +chapter. + + +Traffic Server as a Reverse Proxy +--------------------------------- + +As a reverse proxy, Traffic Server is configured to be the origin server +to which the user is trying to connect (typically, the origin server’s +advertised hostname resolves to Traffic Server, which acts as the real +origin server). The reverse proxy feature is also called server +acceleration. Reverse proxy is described in more detail in :ref:`reverse-proxy-and-http-redirects`. + +Traffic Server in a Cache Hierarchy +----------------------------------- + +Traffic Server can participate in flexible cache hierarchies, in which +Internet requests not fulfilled from one cache are routed to other +regional caches, thereby leveraging the contents and proximity of nearby +caches. In a hierarchy of proxy servers, Traffic Server can act either +as a parent or a child cache to other Traffic Server systems or to +similar caching products. + +Traffic Server supports ICP (Internet Cache Protocol) peering. +Hierarchical caching is described in more detail in :ref:`hierarchical-caching`. + +Deployment Limitations +---------------------- + +There's a number of deployment options that Traffic Server does not support right out +of the box. Such funcionality may be implemented in a plugin, but in some cases +Traffic Server's internal APIs or architectural restrictions won't make it easy: + +* Load Balancing - note that there is an experimental plugin for this: :ref:`balancer-plugin`. + +Traffic Server Components +========================= + +Traffic Server consists of several components that work together to form +a web proxy cache you can easily monitor and configure. These main +components are described below. + +The Traffic Server Cache +------------------------ + +The Traffic Server cache consists of a high-speed object database called +the object store. The object store indexes objects according to URLs and +associated headers. Using sophisticated object management, the object +store can cache alternate versions of the same object (perhaps in a +different language or encoding type). It can also efficiently store very +small and very large objects, thereby minimizing wasted space. When the +cache is full, Traffic Server removes stale data to ensure that the most +requested objects are readily available and fresh. + +Traffic Server is designed to tolerate total disk failures on any of the +cache disks. If the disk fails completely, then Traffic Server marks the +entire disk as corrupt and continues to use remaining disks. If all of +the cache disks fail, then Traffic Server switches to proxy-only mode. +You can partition the cache to reserve a certain amount of disk space +for storing data for specific protocols and origin servers. For more +information about the cache, see :ref:`configuring-the-cache`. + +The RAM Cache +------------- + +Traffic Server maintains a small RAM cache that contains extremely +popular objects. This RAM cache serves the most popular objects as fast +as possible and reduces load on disks, especially during temporary +traffic peaks. You can configure the RAM cache size to suit your needs; +for detailed information, refer to :ref:`changing-the-size-of-the-ram-cache`. + +The Host Database +----------------- + +The Traffic Server host database stores the domain name server (DNS) +entries of origin servers to which Traffic Server connects to fulfill +user requests. This information is used to adapt future protocol +interactions and optimize performance. Along with other information, the +host database tracks: + +- DNS information (for fast conversion of hostnames to IP addresses) +- The HTTP version of each host (so advanced protocol features can be + used with hosts running modern servers) +- Host reliability and availability information (so users will not wait + for servers that are not running) + +The DNS Resolver +---------------- + +Traffic Server includes a fast, asynchronous DNS resolver to streamline +conversion of hostnames to IP addresses. Traffic Server implements the +DNS resolver natively by directly issuing DNS command packets rather +than relying on slower, conventional resolver libraries. Since many DNS +queries can be issued in parallel and a fast DNS cache maintains popular +bindings in memory, DNS traffic is reduced. + +Traffic Server Processes +------------------------ + +Traffic Server contains three processes that work together to serve +requests and manage/control/monitor the health of the system. The three +processes are described below: + +- The :program:`traffic_server` process is the transaction processing engine + of Traffic Server. It is responsible for accepting connections, + processing protocol requests, and serving documents from the cache or + origin server. + +- The :program:`traffic_manager` process is the command and control facility + of the Traffic Server, responsible for launching, monitoring, and + reconfiguring the :program:`traffic_server` process. The :program:`traffic_manager` + process is also responsible for the proxy autoconfiguration port, the + statistics interface, cluster administration, and virtual IP + failover. + + If the :program:`traffic_manager` process detects a :program:`traffic_server` + process failure, it instantly restarts the process but also maintains + a connection queue of all incoming requests. All incoming connections + that arrive in the several seconds before full server restart are + saved in the connection queue and processed in first-come, + first-served order. This connection queueing shields users from any + server restart downtime. + +- The :program:`traffic_cop` process monitors the health of both the + :program:`traffic_server` and :program:`traffic_manager` processes. The + :program:`traffic_cop` process periodically (several times each minute) + queries the :program:`traffic_server` and :program:`traffic_manager` process by + issuing heartbeat requests to fetch synthetic web pages. In the event + of failure (if no response is received within a timeout interval or + if an incorrect response is received), :program:`traffic_cop` restarts the + :program:`traffic_manager` and :program:`traffic_server` processes. + +The figure below illustrates the three Traffic Server processes. + +.. figure:: ../static/images/admin/process.jpg + :align: center + :alt: Illustration of the three Traffic Server Processes + + Illustration of the three Traffic Server Processes + +Administration Tools +-------------------- + +Traffic Server offers the following administration options: + +- The Traffic Line command-line interface is a text-based interface + from which you can monitor Traffic Server performance and network + traffic, as well as configure the Traffic Server system. From Traffic + Line, you can execute individual commands or script a series of + commands in a shell. +- The Traffic Shell command-line interface is an additional + command-line tool that enables you to execute individual commands + that monitor and configure the Traffic Server system. +- Various configuration files enable you to configure Traffic Server + through a simple file-editing and signal-handling interface. Any + changes you make through Traffic Line or Traffic Shell are + automatically made to the configuration files as well. +- Finally there is a clean C API which can be put to good use from a + multitude of languages. The Traffic Server Admin Client demonstrates + this for Perl. + +Traffic Analysis Options +======================== + +Traffic Server provides several options for network traffic analysis and +monitoring: + +- Traffic Line and Traffic Shell enable you to collect and process + statistics obtained from network traffic information. + +- Transaction logging enables you to record information (in a log file) + about every request Traffic Server receives and every error it + detects. By analyzing the log files, you can determine how many + clients used the Traffic Server cache, how much information each of + them requested, and what pages were most popular. You can also see + why a particular transaction was in error and what state the Traffic + Server was in at a particular time; for example, you can see that + Traffic Server was restarted or that cluster communication timed out. + + Traffic Server supports several standard log file formats, such as + Squid and Netscape, and its own custom format. You can analyze the + standard format log files with off-the-shelf analysis packages. To + help with log file analysis, you can separate log files so that they + contain information specific to protocol or hosts. + +Traffic analysis options are described in more detail in :ref:`monitoring-traffic`. + +Traffic Server logging options are described in :ref:`working-with-log-files`. + +Traffic Server Security Options +=============================== + +Traffic Server provides numerous options that enable you to establish +secure communication between the Traffic Server system and other +computers on the network. Using the security options, you can do the +following: + +- Control client access to the Traffic Server proxy cache. +- Configure Traffic Server to use multiple DNS servers to match your + site's security configuration. For example, Traffic Server can use + different DNS servers, depending on whether it needs to resolve + hostnames located inside or outside a firewall. This enables you to + keep your internal network configuration secure while continuing to + provide transparent access to external sites on the Internet. +- Configure Traffic Server to verify that clients are authenticated + before they can access content from the Traffic Server cache. +- Secure connections in reverse proxy mode between a client and Traffic + Server, and Traffic Server and the origin server, using the SSL + termination option. +- Control access via SSL (Secure Sockets Layer). + +Traffic Server security options are described in more detail in +:ref:`security-options`. + +Tuning Traffic Server +===================== + +Finally this last chapter on :ref:`performance-tuning` discusses the vast +number of options that allow to optimally tune Apache Traffic Server for +maximum performance. diff --git a/doc/admin/monitoring-traffic.en.rst b/doc/admin/monitoring-traffic.en.rst new file mode 100644 index 00000000..40820c2a --- /dev/null +++ b/doc/admin/monitoring-traffic.en.rst @@ -0,0 +1,104 @@ + +.. _monitoring-traffic: + +Monitoring Traffic +****************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Traffic Server provides several options for monitoring system +performance and analyzing network traffic. + +.. toctree:: + :maxdepth: 2 + +Traffic Server Monitoring Tools +=============================== + +Traffic Server provides the following tools to monitor system +performance and analyze network traffic: + +- Traffic Server can send email that's triggered by alarms that signal + any detected failure conditions; refer to `Working with Traffic Manager Alarms`_. +- The Traffic Line command-line interface provides an alternative + method of viewing Traffic Server performance and network traffic + information; refer to `Viewing Statistics from Traffic Line`_. +- The Traffic Shell command-line tool provides yet another alternative + method of viewing Traffic Server performance and network traffic + information; refer to `Starting Traffic Shell <../getting-started#StartTrafficShell>`_. + +.. XXX: *someone* seems to have deleted the traffic_shell docs, I'm suspecting igalic, btw. // igalic + +Working with Traffic Manager Alarms +=================================== + +Traffic Server signals an alarm when it detects a problem. For example, +the space allocated to event logs could be full or Traffic Server may +not be able to write to a configuration file. + +Configuring Traffic Server to Email Alarms +------------------------------------------ + +To configure Traffic Server to send an email to a specific address +whenever an alarm occurs, follow the steps below: + +1. In the :file:`records.config` file +2. Set the :ts:cv:`proxy.config.alarm_email` variable to the email address alarms will be routed to. +3. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Using a Script File for Alarms +------------------------------ + +Alarm messages are built into Traffic Server - you cannot change them. +However, you can write a script file to execute certain actions when an +alarm is signaled. Traffic Server provides a sample script file named +``example_alarm_bin.sh`` in the ``bin`` directory; simply modify the +file to suit your needs. + +Viewing Statistics from Traffic Line +==================================== + +You can use the Traffic Line command-line interface to view statistics +about Traffic Server performance and web traffic. In addition to viewing +statistics, you can also configure, stop, and restart the Traffic Server +system. For additional information, refer to :ref:`configure-using-traffic-line` +and :ref:`traffic-line-commands`. You can view +specific information about a Traffic Server node or cluster by +specifying the variable that corresponds to the statistic you want to +see. + +**To view a statistic**, enter the following command::: + + traffic_line -r variable + +where ``variable`` is the variable representing the information you +want to view. For a list of variables you can specify, refer to :ref:`Traffic +Line Variables `. + +For example, the following command displays the document hit rate for +the Traffic Server node::: + + traffic_line -r proxy.node.cache_hit_ratio + +If the Traffic Server ``bin`` directory is not in your path, then +prepend the Traffic Line command with ``./`` (for example: +:option:`traffic_line -r` ``variable``). + + +.. XXX: We're missing docs on how to use traffic_top here. diff --git a/doc/admin/performance-tuning.en.rst b/doc/admin/performance-tuning.en.rst new file mode 100644 index 00000000..1d9efc02 --- /dev/null +++ b/doc/admin/performance-tuning.en.rst @@ -0,0 +1,54 @@ + +.. _performance-tuning: + +Performance Tuning +****************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +.. toctree:: + :maxdepth: 2 + +Before you start +================ + +There is no single option to that will guarantee maximum performance of +Apache Traffic Server in every use-case. There are however numerous options +that help tune its performance under different loads and in its - often +vastly different - use-cases. + +Building Traffic Server +======================= + +A lot of speed can be gained or lost depending on the way ATS is built. + +Tuning the Machine +================== + +Operating Systems Options +========================= + +Optimal Use of Memory +===================== + +Tuning different Thread types +============================= + +Tuning Plugin Execution +======================= diff --git a/doc/admin/reverse-proxy-http-redirects.en.rst b/doc/admin/reverse-proxy-http-redirects.en.rst new file mode 100644 index 00000000..683e423a --- /dev/null +++ b/doc/admin/reverse-proxy-http-redirects.en.rst @@ -0,0 +1,317 @@ + +.. _reverse-proxy-and-http-redirects: + +Reverse Proxy and HTTP Redirects +******************************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +Reverse Proxy and HTTP Redirects +================================ + +As a reverse proxy cache, Traffic Server serves requests on behalf of +origin servers. Traffic Server is configured in such a way that it +appears to clients like a normal origin server. + +.. toctree:: + :maxdepth: 2 + + +Understanding Reverse Proxy Caching +=================================== + +With **forward proxy caching**, Traffic Server handles web requests to +distant origin servers on behalf of the clients requesting the content. +**Reverse proxy caching** (also known as **server acceleration** or +**virtual web hosting**) is different because Traffic Server acts as a +proxy cache on behalf of the origin servers that store the content. +Traffic Server is configured to be *the* origin server which the client +is trying to connect to. In a typical scenario the advertised hostname +of the origin server resolves to Traffic Server, which acts as the real +origin server. + +Reverse Proxy Solutions +----------------------- + +There are many ways to use Traffic Server as a reverse proxy. Below are +a few example scenarios. + +You can use Traffic Server in reverse proxy mode to: + +- Offload heavily-used origin servers +- Deliver content efficiently in geographically distant areas +- Provide security for origin servers that contain sensitive + information + +Offloading Heavily-Used Origin Servers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Traffic Server can absorb requests to the main origin server and improve +the speed & quality of web serving by reducing load and hot spots on +backup origin servers. For example, a web hoster can maintain a scalable +Traffic Server serving engine with a set of low-cost, low-performance, +less-reliable PC origin servers as backup servers. In fact, a single +Traffic Server can act as the virtual origin server for multiple backup +origin servers, as shown in the figure below. + +.. figure:: ../static/images/admin/revproxy.jpg + :align: center + :alt: Traffic Server as reverse proxy for a pair of origin servers + + Traffic Server as reverse proxy for a pair of origin servers + +Delivering Content in Geographically-Dispersed Areas +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Traffic Server can be used in reverse proxy mode to accelerate origin +servers that provide content to areas not located within close +geographical proximity. Caches are typically easier to manage and are +more cost-effective than replicating data. For example, Traffic Server +can be used as a mirror site on the far side of a trans-Atlantic link to +serve users without having to fetch the request and content across +expensive international connections. Unlike replication, for which +hardware must be configured to replicate all data and to handle peak +capacity, Traffic Server dynamically adjusts to optimally use the +serving and storing capacity of the hardware. Traffic Server is also +designed to keep content fresh automatically, thereby eliminating the +complexity of updating remote origin servers. + +Providing Security for an Origin Server +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Traffic Server can be used in reverse proxy mode to provide security for +an origin server. If an origin server contains sensitive information +that you want to keep secure inside your firewall, then you can use a +Traffic Server outside the firewall as a reverse proxy for that origin +server. When outside clients try to access the origin server, the +requests instead go to Traffic Server. If the desired content is *not* +sensitive, then it can be served from the cache. If the content is +sensitive and not cacheable, then Traffic Server obtains the content +from the origin server (the firewall allows only Traffic Server access +to the origin server). The sensitive content resides on the origin +server, safely inside the firewall. + +How Does Reverse Proxy Work? +---------------------------- + +When a browser makes a request, it normally sends that request directly +to the origin server. When Traffic Server is in reverse proxy mode, it +intercepts the request before it reaches the origin server. Typically, +this is done by setting up the DNS entry for the origin server (i.e., +the origin server's 'advertised' hostname) so it resolves to the Traffic +Server IP address. When Traffic Server is configured as the origin +server, the browser connects to Traffic Server rather than the origin +server. For additional information, see `HTTP Reverse Proxy`_. + +.. note:: To avoid a DNS conflict, the origin server’s hostname and its advertised hostname must not be the same. + +HTTP Reverse Proxy +================== + +In reverse proxy mode, Traffic Server serves HTTP requests on behalf of +a web server. The figure below illustrates how Traffic Server in reverse +proxy mode serves an HTTP request from a client browser. + +.. figure:: ../static/images/admin/httprvs.jpg + :align: center + :alt: HTTP reverse proxy + + HTTP reverse proxy + +The figure above demonstrates the following steps: + +1. A client browser sends an HTTP request addressed to a host called + ``www.host.com`` on port 80. Traffic Server receives the request + because it is acting as the origin server (the origin server’s + advertised hostname resolves to Traffic Server). +2. Traffic Server locates a map rule in the ``remap.config`` file and + remaps the request to the specified origin server (``realhost.com``). +3. Traffic Server opens an HTTP connection to the origin server. (If the request is not able to be served from cache) +4. If the request is a cache hit and the content is fresh, then Traffic + Server sends the requested object to the client from the cache. + Otherwise, Traffic Server obtains the requested object from the + origin server, sends the object to the client, and saves a copy in + its cache. + +To configure HTTP reverse proxy, you must perform the following tasks: + +- Create mapping rules in the :file:`remap.config` file (refer to `Creating + Mapping Rules for HTTP Requests`_). :: + + # remap.config + map http://www.host.com http://realhost.com + +- Enable the reverse proxy option (refer to `Enabling HTTP Reverse Proxy`_). + +In addition to the tasks above, you can also `Setting Optional HTTP Reverse Proxy Options`_. + +Handling Origin Server Redirect Responses +----------------------------------------- + +Origin servers often send redirect responses back to browsers +redirecting them to different pages. For example, if an origin server is +overloaded, then it might redirect browsers to a less loaded server. +Origin servers also redirect when web pages that have moved to different +locations. When Traffic Server is configured as a reverse proxy, it must +readdress redirects from origin servers so that browsers are redirected +to Traffic Server and *not* to another origin server. + +To readdress redirects, Traffic Server uses reverse-map rules. Unless +you have :ts:cv:`proxy.config.url_remap.pristine_host_hdr` enabled +(the default) you should generally set up a reverse-map rule for +each map rule. To create reverse-map rules, refer to `Using Mapping +Rules for HTTP Requests`_. + +Using Mapping Rules for HTTP Requests +------------------------------------- + +Traffic Server uses two types of mapping rules for HTTP reverse proxy. + +map rule +~~~~~~~~ + +A **map rule** translates the URL in client requests into the URL where +the content is located. When Traffic Server is in reverse proxy mode and +receives an HTTP client request, it first constructs a complete request +URL from the relative URL and its headers. Traffic Server then looks for +a match by comparing the complete request URL with its list of target +URLs in the :file:`remap.config` file. +For the request URL to match a target URL, the following +conditions must be true: + +- The scheme of both URLs must be the same +- The host in both URLs must be the same. If the request URL contains + an unqualified hostname, then it will never match a target URL with a + fully-qualified hostname. +- The ports in both URLs must be the same. If no port is specified in a + URL, then the default port for the scheme of the URL is used. +- The path portion of the target URL must match a prefix of the request + URL path + +If Traffic Server finds a match, then it translates the request URL into +the replacement URL listed in the map rule: it sets the host and path of +the request URL to match the replacement URL. If the URL contains path +prefixes, then Traffic Server removes the prefix of the path that +matches the target URL path and substitutes it with the path from the +replacement URL. If two mappings match a request URL, then Traffic +Server applies the first mapping listed in the :file:`remap.config` file. + +reverse-map rule +~~~~~~~~~~~~~~~~ + +A **reverse-map rule** translates the URL in origin server redirect +responses to point to Traffic Server so that clients are **redirected** +to Traffic Server instead of accessing an origin server directly. For +example, if there is a directory ``/pub`` on an origin server at +``www.molasses.com`` and a client sends a request to that origin server +for ``/pub``, then the origin server might reply with a redirect by +sending the Header ``Location: http://www.test.com/pub/`` to let the +client know that it was a directory it had requested, not a document (a +common use of redirects is to normalize URLs so that clients can +bookmark documents properly). + +Traffic Server uses ``reverse_map`` rules to prevent clients (that +receive redirects from origin servers) from bypassing Traffic Server and +directly accessing the origin servers. In many cases the client would be +hitting a wall because ``realhost.com`` actually does not resolve for +the client. (E.g.: Because it's running on a port shielded by a +firewall, or because it's running on a non-routable LAN IP) + +Both map and reverse-map rules consist of a **target** (origin) URL and +a **replacement** (destination) URL. In a **map rule**, the target URL +points to Traffic Server and the replacement URL specifies where the +original content is located. In a **reverse-map rule**, the target URL +specifies where the original content is located and the replacement URL +points to Traffic Server. Traffic Server stores mapping rules in the +``remap.config`` file located in the Traffic Server ``config`` +directory. + +Creating Mapping Rules for HTTP Requests +---------------------------------------- + +To create mapping rules + +1. Enter the map and reverse-map rules into the :file:`remap.config` file +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Enabling HTTP Reverse Proxy +--------------------------- + +To enable HTTP reverse proxy, follow the steps below. + +1. Edit the following variable in :file:`records.config` + + - :ts:cv:`proxy.config.reverse_proxy.enabled` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Setting Optional HTTP Reverse Proxy Options +------------------------------------------- + +Traffic Server provides several reverse proxy configuration options in +:file:`records.config` that enable you to: + +- Configure Traffic Server to retain the client host header information + in a request during translation (:ts:cv:`proxy.config.url_remap.pristine_host_hdr`) + +- Configure Traffic Server to serve requests only to the origin servers + listed in the mapping rules. As a result, requests to origin servers + not listed in the mapping rules are not served. (:ts:cv:`proxy.config.url_remap.remap_required`) +- Specify an alternate URL to which incoming requests from older + clients (i.e., ones that do not provide ``Host`` headers) are + directed (:ts:cv:`proxy.config.header.parse.no_host_url_redirect`) + +Don't forget to run the command :option:`traffic_line -x` to apply the +configuration changes. + +Redirecting HTTP Requests +========================= + +You can configure Traffic Server to redirect HTTP requests without +having to contact any origin servers. For example, if you redirect all +requests for ``http://www.ultraseek.com`` to +``http://www.server1.com/products/portal/search/``, then all HTTP +requests for ``www.ultraseek.com`` go directly to +``www.server1.com/products/portal/search``. + +You can configure Traffic Server to perform permanent or temporary +redirects. **Permanent redirects** notify the browser of the URL change +(by returning the HTTP status code **``301``**) so that the browser can +update bookmarks. **Temporary redirects** notify the browser of the URL +change for the current request only (by returning the HTTP status code +**``307``** ). + +To set redirect rules + +1. For each redirect you want to set enter a mapping rule in the + :file:`remap.config` file +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Example +------- + +The following permanently redirects all HTTP requests for +``www.server1.com`` to ``www.server2.com``: :: + + redirect http://www.server1.com http://www.server2.com + + diff --git a/doc/admin/security-options.en.rst b/doc/admin/security-options.en.rst new file mode 100644 index 00000000..3806d4fb --- /dev/null +++ b/doc/admin/security-options.en.rst @@ -0,0 +1,228 @@ +.. _security-options: + +Security Options +**************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Traffic Server provides a number of security features. + +.. _controlling-client-access-to-cache: + +Controlling Client Access to the Proxy Cache +============================================ + +You can configure Traffic Server to allow only certain clients to use +the proxy cache by editing a configuration file. + +#. Add a line in :file:`ip_allow.config` for each IP address or + range of IP addresses allowed to access Traffic Server. +#. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +.. _configuring-dns-server-selection-split-dns: + +Configuring DNS Server Selection (Split DNS) +============================================ + +The **Split DNS** option enables you to configure Traffic Server to use +multiple DNS servers, as dictated by your security requirements. For +example, you might configure Traffic Server to use one set of DNS +servers to resolve hostnames on your internal network, while allowing +DNS servers outside the firewall to resolve hosts on the Internet. This +maintains the security of your intranet, while continuing to provide +direct access to sites outside your organization. + +To configure Split DNS, you must do the following: + +- Specify the rules for performing DNS server selection based on the + destination domain, the destination host, or a URL regular + expression. +- Enable the **Split DNS** option. + +To do this, we + +#. Add rules to :file:`splitdns.config`. +#. In :file:`records.config` set the variable + :ts:cv:`proxy.config.dns.splitDNS.enabled` to ``1`` to enable split DNS. +#. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +.. _configuring-ssl-termination: + +Using SSL Termination +===================== + +The Traffic Server **SSL termination** option enables you to secure +connections in reverse proxy mode between a client and a Traffic Server +and/or Traffic Server and an origin server. + +The following sections describe how to enable and configure the SSL +termination option. + +- Enable and configure SSL termination for client/Traffic Server + connections: :ref:`client-and-traffic-server-connections` +- Enable and configure SSL termination for Traffic Server/origin server + connections: :ref:`traffic-server-and-origin-server-connections` +- Enable and configure SSL termination for both client/Traffic Server + and Traffic Server/origin server connections: :ref:`client-and-traffic-server-connections` + :ref:`traffic-server-and-origin-server-connections`, respectively. + +.. _client-and-traffic-server-connections: + +Client and Traffic Server Connections +------------------------------------- + +The figure below illustrates communication between a client and Traffic +Server (and between Traffic Server and an origin server) when the SSL +termination option is enabled & configured for **client/Traffic +Server connections only**. + +.. figure:: ../static/images/admin/ssl_c.jpg + :align: center + :alt: Client and Traffic Server communication using SSL termination + + Client and Traffic Server communication using SSL termination + +The figure above depicts the following: + +#. The client sends an HTTPS request for content. Traffic Server receives the request and performs the SSL 'handshake' to authenticate the client (depending on the authentication options configured) and determine the encryption method that will be used. If the client is allowed access, then Traffic Server checks its cache for the requested content. + +#. If the request is a cache hit and the content is fresh, then Traffic Server encrypts the content and sends it to the client. The client decrypts the content (using the method determined during the handshake) and displays it. + +#. If the request is a cache miss or cached content is stale, then Traffic Server communicates with the origin server via HTTP and obtains a plain text version of the content. Traffic Server saves the plain text version of the content in its cache, encrypts the content, and sends it to the client. The client decrypts and displays the content. + +To configure Traffic Server to use the SSL termination option for +client/Traffic Server connections, you must do the following: + +- Obtain and install an SSL server certificate from a recognized + certificate authority. The SSL server certificate contains + information that enables the client to authenticate Traffic Server + and exchange encryption keys. +- Configure SSL termination options: + + - Set the port number used for SSL communication using :ts:cv:`proxy.config.http.server_ports`. + - Edit :file:`ssl_multicert.config` to specify the filename and location of the + SSL certificates and private keys. + - (Optional) Configure the use of client certificates: Client + certificates are located on the client. If you configure Traffic + Server to require client certificates, then Traffic Server + verifies the client certificate during the SSL handshake that + authenticates the client. If you configure Traffic Server to *not* + require client certificates, then access to Traffic Server is + managed through other Traffic Server options that have been set + (such as rules in :file:`ip_allow.config`). + - (Optional) Configure the use of Certification Authorities (CAs). + CAs add security by verifying the identity of the person + requesting a certificate. + +In order to accomplish this, we + +#. Edit the following variables in the :ref:`records-config-ssl-termination` section of + :file:`records.config` + + - :ts:cv:`proxy.config.http.server_ports` + - :ts:cv:`proxy.config.ssl.client.certification_level` + - :ts:cv:`proxy.config.ssl.server.cert.path` + - :ts:cv:`proxy.config.ssl.server.private_key.path` + - :ts:cv:`proxy.config.ssl.CA.cert.path` + +#. Run the command :option:`traffic_line -L` to restart Traffic Server on the + local node or :option:`traffic_line -M` to restart Traffic Server on all + the nodes in a cluster. + + +.. This numbering is ridiculous. + +.. _traffic-server-and-origin-server-connections: + +Traffic Server and Origin Server Connections +-------------------------------------------- + +The figure below illustrates communication between Traffic Server and an +origin server when the SSL termination option is enabled for **Traffic +Server/origin server connections**. + +.. figure:: ../static/images/admin/ssl_os.jpg + :align: center + :alt: Traffic Server and origin server communication using SSL termination + + Traffic Server and origin server communication using SSL termination + +The figure above depicts the following: + +**Step 1:** If a client request is a cache miss or is stale, then +Traffic Server sends an HTTPS request for the content to the origin +server. The origin server receives the request and performs the SSL +handshake to authenticate Traffic Server and determine the encryption +method to be used. + +**Step 2:** If Traffic Server is allowed access, then the origin server +encrypts the content and sends it to Traffic Server, where it is +decrypted (using the method determined during the handshake). A plain +text version of the content is saved in the cache. + +**Step 3:** If SSL termination is enabled for client /Traffic Server +connections, then Traffic Server re-encrypts the content and sends it to +the client via HTTPS, where it is decrypted and displayed. If SSL +termination is not enabled for client/Traffic Server connections, then +Traffic Server sends the plain text version of the content to the client +via HTTP. + +To configure Traffic Server to use the SSL termination option for +Traffic Server and origin server connections, you must do the following: + +- Obtain and install an SSL client certificate from a recognized + certificate authority. The SSL client certificate contains + information that allows the origin server to authenticate Traffic + Server (the client certificate is optional). +- Configure SSL termination options: +- Enable the SSL termination option. + + - Set the port number used for SSL communication. + - Specify the filename and location of the SSL client certificate + (if you choose to use a client certificate). + - Specify the filename and location of the Traffic Server private + key (if the private key is not located in the client certificate + file). Traffic Server uses its private key during the SSL + handshake to decrypt the session encryption keys. The private key + must be stored and protected against theft. + - Configure the use of CAs. CAs allow the Traffic Server that's + acting as a client to verify the identity of the server with which + it is communicating, thereby enabling exchange of encryption keys. + +In order to accomplish this, we: + +.. This numbering is ridiculous. I need to re-read this doc with a fresh mind and re(number|order) it. + +1. Edit the following variables in the :ref:`records-config-ssl-termination` section of + :file:`records.config`: + + - :ts:cv:`proxy.config.http.server_ports` + - :ts:cv:`proxy.config.ssl.client.verify.server` + - :ts:cv:`proxy.config.ssl.client.cert.filename` + - :ts:cv:`proxy.config.ssl.client.cert.path` + - :ts:cv:`proxy.config.ssl.client.private_key.filename` + - :ts:cv:`proxy.config.ssl.client.private_key.path` + - :ts:cv:`proxy.config.ssl.client.CA.cert.filename` + - :ts:cv:`proxy.config.ssl.client.CA.cert.path` + +2. Run the command :option:`traffic_line -L` to restart Traffic Server on the + local node or :option:`traffic_line -M` to restart Traffic Server on all + the nodes in a cluster. + diff --git a/doc/admin/session-protocol.en.rst b/doc/admin/session-protocol.en.rst new file mode 100644 index 00000000..84d7040f --- /dev/null +++ b/doc/admin/session-protocol.en.rst @@ -0,0 +1,55 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. _session-protocol: + +Session Protocol +**************** + +Traffic Server supports some session level protocols in place or on +top of HTTP. These can be provided by a plugin +(:ref:`see `) or be one that is supported +directly by Traffic Server. Currently the +`SPDY `_ protocol is the only one current +supported but it is planned to support HTTP 2 when that is finalized. + +Session protocols are specified by explicit names, based on the `NPN `_ names. The core supported names are + +* ``http/0.9`` +* ``http/1.0`` +* ``http/1.1`` +* ``http/2`` +* ``spdy/1`` +* ``spdy/2`` +* ``spdy/3`` +* ``spdy/3.1`` + +The ``http/2`` value is currently not functional but included for future use. ``spdy/1`` and ``spdy/2`` are obsolete but are include for completeness. + +The session protocols supported on a proxy port are a subset of these values. For convenience some psuedo-values are defined in terms of these fundamental protocols. + +* ``http`` means ``http/0.9``, ``http/1.0``, and ``http/1.1`` +* ``spdy`` means ``spdy/3`` and ``spdy/3.1``. +* ``http2`` means ``http/2`` + +Each proxy port can be +configured in :ts:cv:`proxy.config.http.server_ports` to support a subset of +these session protocols. For TLS enabled connections this +configuration controls which protocols are offered by NPN. For non-TLS +proxy ports protocol sniffing is used to determine which protocol is +being used by the client. If the detected protocol is not supported +for that proxy port the connection is dropped. diff --git a/doc/admin/traffic-server-error-messages.en.rst b/doc/admin/traffic-server-error-messages.en.rst new file mode 100644 index 00000000..01586c25 --- /dev/null +++ b/doc/admin/traffic-server-error-messages.en.rst @@ -0,0 +1,373 @@ +.. _traffic-server-error-messages: + +Error Messages +************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. toctree:: + :maxdepth: 2 + +Traffic Server Error Messages +============================= + +The following table lists messages that can appear in system log files. +This list is not exhaustive; it simply describes common warning messages +that can occur and which might require your attention. + +Traffic Server Process Fatal +============================ + +``Accept port is not between 1 and 65535. Please check configuration`` + The port specified in the :file:`records.config` file that accepts + incoming HTTP requests is not valid. + +``Self loop is detected in parent proxy configuration`` + The name and port of the parent proxy match that of Traffic Server. + This creates a loop when Traffic Server attempts to send the request + to the parent proxy. + +Traffic Server Warnings +----------------------- + +`` error: error_number`` + Generic logging error. + +``Bad cluster major version range for node connect failed`` + Incompatible software versions causing a problem. + +``Connect by disallowed client , closing`` + The specified client is not allowed to connect to Traffic Server; + the client IP address is not listed in the ``ip_allow.config`` file. + +``Could not rename log to `` + System error when renaming log file during roll. + +``Did of backup; still to do `` + Congestion is approaching. + +``Different clustering minor versions for node continuing`` + Incompatible software versions are causing a problem. + +``Log format symbol not found`` + Custom log format references a field symbol that does not exist. + Refer to :ref:`event-logging-formats`. + +``Missing field for field marker`` + Error reading a log buffer. + +``Unable to open log file , errno=`` + Cannot open the log file. + +``Error accessing disk `` + Traffic Server might have a cache read problem. You might need to + replace the disk. + +``Too many errors accessing disk : declaring disk bad`` + Traffic Server is not using the cache disk because it encountered + too many errors. The disk might be corrupt and might have to be + replaced. + +``No cache disks specified in storage.config file: cache disabled`` + The Traffic Server ``storage.config`` file does not list any cache + disks; Traffic Server is running in proxy-only mode. You must add + the disks you want to use for the cache to :file:`storage.config`. + +Traffic Server Alarm Messages +============================= + +``[Rollback::Rollback] Config file is read-only: `` + Go to the Traffic Server ``config`` directory and check the + indicated file permissions; change if necessary. + +``[Rollback::Rollback] Unable to read or write config file `` + Go to the Traffic Server ``config`` directory and make sure the + indicated file exists. Check permissions and modify if necessary. + +``[Traffic Manager] Configuration File Update Failed: `` + Go to the Traffic Server ``config`` directory and check the + indicated file permissions; change if necessary. + +``[Traffic Manager] Mgmt <==>Proxy conn. closed`` + An informational message to inform you that the :program:`traffic_server` + process is down. + +``Access logging suspended - configured space allocation exhausted.`` + The space allocated to the event log files is full; you must either + increase the space or delete some log files so that access logging + to continue. To prevent this error, consider rolling log files more + frequently and enabling the autodelete feature. + +``Access logging suspended - no more space on the logging partition.`` + The entire partition containing the event logs is full; you must + delete or move some log files to enable access logging to continue. + To prevent this error, consider rolling log files more frequently + and enabling the autodelete feature. + +``Created zero length place holder for config file `` + Go to the Traffic Server ``config`` directory and check the + indicated file. If it is indeed zero in length, then use a backup + copy of the configuration file. + +``Traffic Server could not open logfile `` + Check permissions for the indicated file and the logging directory. + +``Traffic Server failed to parse line of the logging config file `` + Check your custom log configuration file; there could be syntax + errors. Refer to :ref:`custom-logging-fields` for correct custom log format fields. + +``vip_config binary is not setuid root, manager will be unable to enable virtual ip addresses`` + The :program:`traffic_manager` process is not able to set virtual IP + addresses. You must ``setuid root`` for the ``vip_config`` file in + the Traffic Server ``bin`` directory. + +HTML Messages Sent to Clients +============================= + +Traffic Server returns detailed error messages to browser clients when +there are problems with the HTTP transactions requested by the browser. +These Traffic Server response messages correspond to standard HTTP +response codes, but provide more information. A list of the more +frequently-encountered HTTP response codes is provided in :ref:`standard-http-response-messages`. +You can customize the Traffic Server response messages (typically in +proxy/config/body_factory/default/, but set by +:ts:cv:`proxy.config.body_factory.template_sets_dir`). + +The following table lists the hard-coded Traffic Server HTTP messages, +with corresponding HTTP response codes and customizable files. + +``Access Denied`` + ``403`` + You are not allowed to access the document at location ``URL``. + ``access#denied`` + +``Cache Read Error`` + ``500`` + Error reading from cache; please retry request. + ``cache#read_error`` + +``Connection Timed Out`` + ``504`` + Too much time has elapsed since the server has sent data. + ``timeout#inactivity`` + +``Content Length Required`` + ``400`` + Could not process this request because ``Content-Length`` was not specified. + ``request#no_content_length`` + +``Cycle Detected`` + ``400`` + Your request is prohibited because it would cause an HTTP proxy cycle. + ``request#cycle_detected`` + +``Forbidden`` + ``403`` + ```` is not an allowed port for SSL connections (you have made a request for a secure SSL connection to a forbidden port number). + ``access#ssl_forbidden`` + +``Host Header Required`` + ``400`` + An attempt was made to transparently proxy your request, but this + attempt failed because your browser did not send an HTTP ``Host`` + header. Manually configure your browser to use + ``http://:`` as the HTTP + proxy. Alternatively, end users can upgrade to a browser that + supports the HTTP ``Host`` header field. + ``interception#no_host`` + +``Host Header Required`` + ``400`` + Because your browser did not send a ``Host`` HTTP header field, the + virtual host being requested could not be determined. To access the + website correctly, you must upgrade to a browser that supports the + HTTP ``Host`` header field. + ``request#no_host`` + +``HTTP Version Not Supported`` + ``505`` + The origin server ```` is using an unsupported version + of the HTTP protocol. + ``response#bad_version`` + +``Invalid Content Length`` + ``400`` + Could not process this request because the specified ``Content-Length`` + was invalid (less than 0).. + ``request#invalid_content_length`` + +``Invalid HTTP Request`` + ``400`` + Could not process this ```` HTTP method request for ``URL``. + ``request#syntax_error`` + +``Invalid HTTP Response`` + ``502`` + The host ```` did not return the document ``URL`` correctly. + ``response#bad_response`` + +``Malformed Server Response`` + ``502`` + The host ```` did not return the document ``URL`` correctly. + ``response#bad_response`` + +``Malformed Server Response Status`` + ``502`` + The host ```` did not return the document ``URL`` correctly. + ``response#bad_response`` + +``Maximum Transaction Time exceeded`` + ``504`` + Too much time has elapsed while transmitting document ``URL``. + ``timeout#activity`` + +``No Response Header From Server`` + ``502`` + The host ```` did not return the document ``URL`` correctly. + ``response#bad_response`` + +``Not Cached`` + ``504`` + This document was not available in the cache, and you (the client) + only accept cached copies. + ``cache#not_in_cache`` + +``Not Found on Accelerator`` + ``404`` + The request for ``URL`` on host ```` was not found. + Check the location and try again. + ``urlrouting#no_mapping`` + +``NULL`` + ``502`` + The host ```` did not return the document ``URL`` correctly. + ``response#bad_response`` + +``Proxy Authentication Required`` + ``407`` + Please log in with username and password. + ``access#proxy_auth_required`` + +``Server Hangup`` + ``502`` + The server ```` closed the connection before the transaction was completed. + ``connect#hangup`` + +``Temporarily Moved`` + ``302`` + The document you requested, ``URL``, has moved to a new location. The new location is ````. + ``redirect#moved_temporarily`` + +``Transcoding Not Available`` + ``406`` + Unable to provide the document ``URL`` in the format requested by your browser. + ``transcoding#unsupported`` + +``Tunnel Connection Failed`` + ``502`` + Could not connect to the server ````. + ``connect#failed_connect`` + +``Unknown Error`` + ``502`` + The host ```` did not return the document ``URL`` correctly. + ``response#bad_response`` + +``Unknown Host`` + ``500`` + Unable to locate the server named ````; the server does + not have a DNS entry. Perhaps there is a misspelling in the server + name or the server no longer exists; double-check the name and try + again. + ``connect#dns_failed`` + +``Unsupported URL Scheme`` + ``400`` + Cannot perform your request for the document ``URL`` because the + protocol scheme is unknown. + ``request#scheme_unsupported`` + + +.. _standard-http-response-messages: + +Standard HTTP Response Messages +------------------------------- + +The following standard HTTP response messages are provided for your +information. + +``200`` + OK + +``202`` + Accepted + +``204`` + No Content + +``206`` + Partial Content + +``300`` + Multiple Choices + +``301`` + Moved Permanently + +``302`` + Found + +``303`` + See Other + +``304`` + Not Modified + +``400`` + Bad Request + +``401`` + Unauthorized; retry + +``403`` + Forbidden + +``404`` + Not Found + +``405`` + Method Not Allowed + +``406`` + Not acceptable + +``408`` + Request Timeout + +``500`` + Internal server error + +``501`` + Not Implemented + +``502`` + Bad Gateway + +``504`` + Gateway Timeout + diff --git a/doc/admin/transparent-proxy.en.rst b/doc/admin/transparent-proxy.en.rst new file mode 100644 index 00000000..2a2eb5c5 --- /dev/null +++ b/doc/admin/transparent-proxy.en.rst @@ -0,0 +1,113 @@ +.. _transparent-proxy: + +Transparent Proxying +******************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +.. toctree:: + :maxdepth: 2 + + transparent-proxy/build.en + transparent-proxy/bridge.en + transparent-proxy/router-inline.en + transparent-proxy/wccp-configuration.en + +Transparent Proxying is the ability of a proxy (such as ATS) to +intercept connections between clients and servers without being visible. + +The general network structure that will be used in this documentation +looks like this -- + +.. figure:: ../static/images/admin/ats-basic-traffic.png + :align: center + :alt: ATS basic traffic flow of Transparent Proxy + + ATS basic traffic flow of Transparent Proxy + +There must be a gateway device through which passes all network traffic +from the client to the Internet (or external cloud). The gateway is +responsible for effectively splicing ATS in to selected streams of that +traffic. Each traffic stream is split in to two, with ATS terminating +both sides. That is, stream green-1, red-2, is split in to the green +connection and the red connection. Note that ATS may or may not be on +the gateway system, the redirected traffic can flow over other network +infrastructure. + +Because ATS uses two connections transparency can be set independently +on the client and origin server (Internet / external cloud) side. We +will define what is generally called "transparent proxy" as two aspects, +*inbound transparency* and *outbound transparency*. + +Inbound transparency is a proxy that is transparent to connections that +are inbound to the proxy, i.e. a connection initiated by a client which +connects to the proxy (green-1). Similarly, outbound transparency is a +proxy that is transparent to connections that are outbound from the +proxy, i.e. a connection initiated by the proxy to an origin server +(red-2). + +In most treatments these two types of transparency are treated as +unitarily but that is not required. This implementation supports +transparency independently on the two (client, origin server) sides +(`use cases `_. + +It is critical to note that any transparency requires specialized +routing and cannot be done solely by configuring ATS. ATS transparency +also requires support from the Linux kernel and therefore currently only +works on sufficiently recent Linux kernels that support the following +features -- + +- TPROXY +- POSIX capabilities + +In addition the specialized routing will require using ``iptables`` and +in some cases ``ebtables``. + +Standard build procedures should work for transparency support but if +not consult these :ref:`more detailed instructions ` + +Transparency is configured per server port not globally. This is done +via the configuration values :ts:cv:`proxy.config.http.server_ports`. +In addition, :ts:cv:`proxy.config.reverse_proxy.enabled` must be enabled if the +client side is transparent. That should be fixed in a future patch. + +In the first case use the attribute character (replacing the default +'X') + +**Attribute** **Transparency Style** **Reverse Proxy** + +``=`` + Full transparency: either + +``>`` + Inbound (client) transparency: enabled + +``<`` + Outbound (origin server) transparency: either + +In the outbound transparent case clients must connect directly to ATS +either through an explicit proxy mechanism or by advertising the IP +address of the ATS server via DNS as the origin server address. + +Some tested scenarios -- + +- :doc:`transparent-proxy/bridge.en` +- :doc:`transparent-proxy/router-inline.en` +- :doc:`transparent-proxy/wccp-configuration.en` + diff --git a/doc/admin/transparent-proxy/bridge.en.rst b/doc/admin/transparent-proxy/bridge.en.rst new file mode 100644 index 00000000..388a9c8f --- /dev/null +++ b/doc/admin/transparent-proxy/bridge.en.rst @@ -0,0 +1,177 @@ +Inline on a Linux Bridge +************************ + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + + +A Linux can be configured to operate in `bridge mode `_. +Two or more physical interfaces are assigned to the bridge. A single IP +address is shared across the interfaces. By default any packet that +arrives on one interface is immediately routed out another bridge +interface. + +Linux packages required: + +- bridge-utils +- ebtables + +In our example of setting up bridge mode we will use a local address of +192.168.1.11/24 and interfaces ``eth0`` and ``eth1`` as the bridge +interfaces (more detailed documentation is available +`here `_). +You may omit the '#' character and everything after it. :: + + brctl addbr br0 # create bridge device + brctl stp br0 off # Disable spanning tree protocol + brctl addif br0 eth0 # Add eth0 to bridge + brctl addif br0 eth1 # Add eth1 to bridge + + ifconfig eth0 0 0.0.0.0 # Get rid of interface IP addresses + ifconfig eth1 0 0.0.0.0 # ditto # Set the bridge IP address and enable it + ifconfig br0 192.168.1.11 netmask 255.255.255.0 up + +If you have not already done so, remember to add a default route, such +as this one for a gateway of 192.168.1.1. :: + + ip route add default via 192.168.1.1 + +At this point it is a good idea to test connectivity to verify the basic +bridge is functional. + +Once the bridge is verified to work, this is the basic traffic pattern +of interest. + +.. figure:: ../../static/images/admin/ats-traffic-bridge.png + :align: center + :alt: Picture of traffic flow through a bridge with ATS + + Picture of traffic flow through a bridge with ATS + +The green arrows are packets originating from the client and the red +arrows are packets originating from the origin server. All traffic not +directed to the local address will pass through the bridge. We need to +break into some of the traffic and subject it to routing so that it can +be routed to ATS. This requires ``ebtables``. The flows we want to +intercept are green 1 (from client to bridge) and red 1 (origin server +to bridge). + +In this example we will intercept port 80 (HTTP) traffic. We will use +the ``BROUTING`` chain because it is traversed only for packets that +originated externally and arrived on a (forwarding enabled) interface. +Although it looks like this will intercept all port 80 traffic it will +only affect the two flows described above. ``-j redirect`` marks the +packet as being diverted to the bridge and not forwarded, and the +``DROP`` target puts the packets in the normal ``iptables`` routing so +that we can use standard device tests on them [1]_. Although this +example handles only port 80, other ports are the same except for the +port value. Note also the port here is the port from the point of view +of the clients and origin servers, not the Traffic Server server port. :: + + ebtables -t broute -F # Flush the table + # inbound traffic + ebtables -t broute -A BROUTING -p IPv4 --ip-proto tcp --ip-dport 80 \ + -j redirect --redirect-target DROP + # returning outbound traffic + ebtables -t broute -A BROUTING -p IPv4 --ip-proto tcp --ip-sport 80 \ + -j redirect --redirect-target DROP + +Traffic Server operates at layer 3 so we need to use ``iptables`` to +handle IP packets appropriately.:: + + iptables -t mangle -A PREROUTING -i eth1 -p tcp -m tcp --dport 80 \ + -j TPROXY --on-ip 0.0.0.0 --on-port 8080 --tproxy-mark 1/1 + iptables -t mangle -A PREROUTING -i eth0 -p tcp -m tcp --sport 80 \ + -j MARK --set-mark 1/1 + +At this point the directionality of the interfaces matters. For the +example ``eth1`` is the inbound (client side) interface, while ``eth0`` +is the outbound (origin server side) interface. We mark both flows of +packets so that we can use policy routing on them. For inbound packets +we need to use ``TPROXY`` to force acceptance of packets to foreign IP +addresses. For returning outbound packets there will be a socket open +bound to the foreign address, we need only force it to be delivered +locally. The value for ``--on-ip`` is 0 because the target port is +listening and not bound to a specific address. The value for +``--on-port`` must match the Traffic Server server port. Otherwise its +value is arbitrary. ``--dport`` and ``--sport`` specify the port from +the point of view of the clients and origin servers. + +Once the flows are marked we can force them to be delivered locally via +the loopback interface via a policy routing table.:: + + ip rule add fwmark 1/1 table 1 + ip route add local 0.0.0.0/0 dev lo table 1 + +The marking used is arbitrary but it must be consistent between +``iptables`` and the routing rule. The table number must be in the range +1..253. + +To configure Traffic Server set the following values in +:file:`records.config` + +- :ts:cv:`proxy.config.http.server_ports` *value from* ``--on-port`` (see below) + +- :ts:cv:`proxy.config.reverse_proxy.enabled` ``1`` + +- :ts:cv:`proxy.config.url_remap.remap_required` ``0`` + +You may also need to set :ts:cv:`proxy.config.cluster.ethernet_interface` to +"br0" (the name of the bridge interface from the ``_). + +Additional troubleshooting +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Check to make sure that ``iptables`` is not filtering (blocking) + incoming HTTP connections. + + It is frequently the case that the default tables prevent incoming HTTP. You can clear all filters with the + commands:: + + iptables -t filter --flush FORWARD + iptables -t filter --flush INPUT + + That is a bit drastic and should only be used for testing / debugging. A + live system will likely need some filters in place but that is beyond + the scope of this document. If this fixes the problem, then your filter + set is too restrictive. + + Note that this problem will prevent the basic bridge (without ATS) from + allowing HTTP traffic through. + +* Verify that IP packet forwarding is enabled. + + You can check this with:: + + cat /proc/sys/net/ipv4/ip_forward + + The output should be a non-zero value (usually '1'). If it is zero, you + can set it with:: + + echo '1' > /proc/sys/net/ipv4/ip_forward + + This can setting can be persisted by putting it in ``/etc/sysctl.conf``: :: + + net/ipv4/ip_forward=1 + +.. rubric:: Footnotes + +.. [1] + The ``--redirect-target`` can be omitted, but then the ``iptables`` + rules would need to use ``--physdev`` instead of just ``-i``. The + actual packet processing is identical. diff --git a/doc/admin/transparent-proxy/build.en.rst b/doc/admin/transparent-proxy/build.en.rst new file mode 100644 index 00000000..e547a061 --- /dev/null +++ b/doc/admin/transparent-proxy/build.en.rst @@ -0,0 +1,60 @@ +.. _building-ats-for-transparency: + +Building ATS for transparency +***************************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +In most cases, if your environment supports transparency then +``configure`` will automatically enable it. For other environments you +may need to twiddle the ``configure`` options. + +``--enable-posix-cap`` + This enables POSIX capabilities, which are required for + transparency. These are compiled in by default. To check your + system, look for the header file ``sys/capability.h`` and the system + library ``libcap``. These are in the packages ``libcap`` and + ``libcap-devel`` or ``libcap-dev`` (depending on the Distribution) + contra-respectively. + +``--enable-tproxy[=value]`` + Enable TPROXY support, which is the Linux kernel feature used for + transparency. This should be present in the base installation, there + is no package associated with it. \* ``auto`` Do automatic checks + for the the TPROXY header file (``linux/in.h``) and enable TPROXY + support if the ``IP_TRANSPARENT`` definition is present. This is the + default if this option is not specified or ``value`` is omitted. \* + ``no`` Do not check for TPROXY support, disable support for it. \* + ``force`` Do not check for TPROXY support, enable it using the $ats@ + built in value for ``IP_TRANSPARENT``. This is useful for systems + that have it in the kernel for but some reason do not have the + appropriate system header file. \* *number* Do not check for TPROXY + support, use *number* as the ``IP_TRANSPARENT`` value. There are, at + present, no known standard distributions of Linux that support + TPROXY but use a value different from the built in ATS default. + However, a custom built kernel may do so and in that case the + specific value can be specified. + +In the default case, ATS configuration will automatically check for +TPROXY support via the presence of the ``linux/in.h`` header file and +compile in TPROXY support if it is available. If that fails, you may be +able to recover by using one of the options above. Note that +transparency may be built in by default but it is not active unless +explicitly enabled in the ATS configuration files. + diff --git a/doc/admin/transparent-proxy/router-inline.en.rst b/doc/admin/transparent-proxy/router-inline.en.rst new file mode 100644 index 00000000..f7047b9a --- /dev/null +++ b/doc/admin/transparent-proxy/router-inline.en.rst @@ -0,0 +1,91 @@ +Inline on a Linux router +************************ + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +The routed set up presumes the set of clients are on distinct networks +behind a single physical interface. For the purposes of this example +will we presume + +- The clients are on network 172.28.56.0/24 +- The router connects the networks 172.28.56.0/24 and 192.168.1.0/24 +- Interface ``eth0`` is on the network 192.168.1.0/24 +- Interface ``eth1`` is on the network 172.28.56.0/24 +- The router is already configured to route traffic correctly for the + clients. + +In this example we will intercept port 80 (HTTP) traffic that traverses +the router. The first step is to use ``iptables`` to handle IP packets +appropriately. + +:: + + # reflow client web traffic to TPROXY + iptables -t mangle -A PREROUTING -i eth1 -p tcp -m tcp --dport 80 -j TPROXY \ + --on-ip 0.0.0.0 --on-port 8080 --tproxy-mark 1/1 + # Let locally directed traffic pass through. + iptables -t mangle -A PREROUTING -i eth0 --source 192.168.56.0/24 -j ACCEPT + iptables -t mangle -A PREROUTING -i eth0 --destination 192.168.56.0/24 -j ACCEPT + # Mark presumed return web traffic + iptables -t mangle -A PREROUTING -i eth0 -p tcp -m tcp --sport 80 -j MARK --set-mark 1/1 + +We mark packets so that we can use policy routing on them. For inbound +packets we use ``TPROXY`` to make it possible to accept packets sent to +foreign IP addresses. For returning outbound packets there will be a +socket open bound to the foreign address, we need only force it to be +delivered locally. The value for ``--on-ip`` is 0 because the target +port is listening and not bound to a specific address. The value for +``--on-port`` must match the Traffic Server server port. Otherwise its +value is arbitrary. ``--dport`` and ``--sport`` specify the port from +the point of view of the clients and origin servers. The middle two +lines exempt local web traffic from being marked for Traffic Server -- +these rules can be tightened or loosened as needed. They server by +matching traffic and exiting the ``iptables`` processing via ``ACCEPT`` +before the last line is checked. + +Once the flows are marked we can force them to be delivered locally via +the loopback interface via a policy routing table. + +:: + + ip rule add fwmark 1/1 table 1 + ip route add local 0.0.0.0/0 dev lo table 1 + +The marking used is arbitrary but it must be consistent between +``iptables`` and the routing rule. The table number must be in the range +1..253. + +To configure Traffic Server set the following values in +:file:`records.config` + +``proxy.config.http.server_port`` + ``STRING`` + Default: *value from* ``--on-port`` + +``proxy.config.http.server_port_attr`` + ``STRING`` + Default: ``=`` + +``proxy.config.reverse_proxy.enabled`` + ``INT`` + Default: ``1`` + +``proxy.config.url_remap.remap_required`` + ``INT`` + Default: ``0`` + diff --git a/doc/admin/transparent-proxy/wccp-configuration.en.rst b/doc/admin/transparent-proxy/wccp-configuration.en.rst new file mode 100644 index 00000000..25888228 --- /dev/null +++ b/doc/admin/transparent-proxy/wccp-configuration.en.rst @@ -0,0 +1,155 @@ +WCCP Configuration +****************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +.. toctree:: + :maxdepth: 2 + +`WCCP `_ +is de-facto semi-standard used by routers to redirect network traffic to +caches. It is available on most Cisco™ routers although it does not +appear to be officially supported by Cisco. The primary benefits of WCCP +are + +- If already have a router that supports WCCP inline you do not + have to change your network topology. +- WCCP fails open so that if the Traffic Server machine fails, it is + bypassed and users continue to have Internet access. + +Use of WCCP only makes sense for client side transparency [1]_ +because if the clients are explicitly proxied by Traffic Server there's +no benefit to WCCP fail open, as the clients will continue to directly +access the unresponsive Traffic Server host. It would be better to +adjust the routing tables on the router for explicit proxying. + +Because the router serves as the inline network element, Traffic Server +must run on a separate host. This host can be located anywhere as long +as Traffic Server is either on the same network segment or a GRE tunnel +can be maintained between the Traffic Server host and the router. + +|important| This document presumes that the router is already properly +configured to handle traffic between the clients and the origin servers. +If you are not certain, verify it before attempting to configure Traffic +Server with WCCP. This is also a good state to which to revert should +the configuration go badly. + +Configuration overview +====================== + +Setting WCCP is a three step process, first configuring the router, the +Traffic Server host, and Traffic Server. + +|image1| The router will **not** respond to WCCP protocol packets unless +explicitly configured to do so. Via WCCP, the router can be made to +perform packet interception and redirection needed by Traffic Server +transparency. The WCCP protocol in effect acts as means of controlling a +rough form of policy routing with positive heartbeat cutoff. + +The Traffic Server host system must also be configured using +``iptables`` to accept connections on foreign addresses. This is done +roughly the same way as the standard transparency use. + +Finally Traffic Server itself must be configured for transparency and +use of WCCP. The former is again very similar to the standard use, while +WCCP configuration is specific to WCCP and uses a separate configuration +file, referred to by the :file:`records.config` file. + +The primary concern for configuration in which of three basic topologies +are to be used. + +- Dedicated -- Traffic Server traffic goes over an interface that is + not used for client nor origin server traffic. +- Shared -- Traffic Server traffic shares an interface with client or + server traffic. +- Inside Shared -- Traffic Server and client traffic share an + interface. +- `Outside Shared `_ -- Traffic Server and + origin server traffic share an interface. + +In general the dedicated topology is preferred. However, if the router +has only two interfaces one of the shared topologies will be +required [2]_ Click the links above for more detailed configuration +information on a specific topology. + +Shared interface issues +----------------------- + +A shared interface topology has additional issues compared to a +dedicated topology that must be handled. Such a topology is required if +the router has only two interfaces, and because of these additional +issues it is normally only used in such cases, although nothing prevents +it use even if the router has three or more interfaces. + +The basic concept for a shared interface is to use a tunnel to simulate +the dedicated interface case. This enables the packets to be +distinguished at layer 3. For this reason, layer 2 redirection cannot be +used because the WCCP configuration cannot distinguish between packets +returning from the origin server and packets returning from Traffic +Server as they are distinguished only by layer 2 addressing [3]_. +Fortunately the GRE tunnel used for packet forwarding and return can be +used as the simulated interface for Traffic Server. + +Frequently encountered problems +------------------------------- + +MTU and fragmentation +~~~~~~~~~~~~~~~~~~~~~ + +In most cases the basic configuration using a tunnel in any topology can +fail due to issues with fragmentation. The socket logic is unable to +know that its packets will eventually be put in to a tunnel which will +by its nature have a smaller +`MTU `_ than the +physical interface which it uses. This can lead to pathological behavior +or outright failure as the packets sent are just a little too big. It is +not possible to solve easily by changing the MTU on the physical +interface because the tunnel interface uses that to compute its own MTU. + +References +========== + +- `WCCP Router Configuration Commands - IOS + 12.2 `_ + + + + + + + + +.. [1] + Server side transparency should also be used, but it is not as + significant. In its absence, however, origin servers may see the + source address of connections suddenly change from the Traffic Server + address to client addresses, which could cause problems. Further, the + primary reason for not having server side transparency is to hide + client addresses which is defeated if the Traffic Server host fails. + +.. [2] + If your router has only one interface, it's hardly a *router*. + +.. [3] + This is not fundamentally impossible, as the packets are distinct in + layer + +.. |important| image:: ../../static/images/docbook/important.png +.. |image1| image:: ../../static/images/docbook/important.png + diff --git a/doc/admin/working-log-files.en.rst b/doc/admin/working-log-files.en.rst new file mode 100644 index 00000000..e4458d8f --- /dev/null +++ b/doc/admin/working-log-files.en.rst @@ -0,0 +1,1157 @@ +.. _working-with-log-files: + +Working with Log Files +********************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +Traffic Server generates log files that contain information about every +request it receives and every error it detects. This chapter will examine the +various log features, the configuration formats and also examine the various +pre-defined log formats that are available. + +.. _understanding-traffic-server-log-files: + +Understanding Traffic Server Log Files +====================================== + +Traffic Server records information about every transaction (or request) +it processes and every error it detects in log files. Traffic Server +keeps three types of log files: + +- **Error log files** record information about why a particular + transaction was in error. + +- **Event log files** (also called **access log files**) record + information about the state of each transaction Traffic Server + processes. + +- **System log files** record system information, including messages + about the state of Traffic Server and errors/warnings it produces. + This kind of information might include a note that event log files + were rolled, a warning that cluster communication timed out, or an + error indicating that Traffic Server was restarted. + + All system information messages are logged with the system-wide + logging facility :manpage:`syslog` under the daemon facility. The + :manpage:`syslog.conf(5)` configuration file (stored in the ``/etc`` directory) + specifies where these messages are logged. A typical location is + ``/var/log/messages`` (Linux). + + The :manpage:`syslog(8)` process works on a system-wide basis, so it serves as + the single repository for messages from all Traffic Server processes + (including :program:`traffic_server`, :program:`traffic_manager`, and + :program:`traffic_cop`). + + System information logs observe a static format. Each log entry in + the log contains information about the date and time the error was + logged, the hostname of the Traffic Server that reported the error, + and a description of the error or warning. + + Refer to :ref:`traffic-server-error-messages` for a list of the + messages logged by Traffic Server. + +By default, Traffic Server creates both error and event log files and +records system information in system log files. You can disable event +logging and/or error logging by setting the configuration variable +:ts:cv:`proxy.config.log.logging_enabled` (in the :file:`records.config` file) +to one of the following values: + +- ``0`` to disable both event and error logging +- ``1`` to enable error logging only +- ``2`` to enable transaction logging only +- ``3`` to enable both transaction and error logging + +Understanding Event Log Files +============================= + +Event log files record information about every request that Traffic +Server processes. By analyzing the log files, you can determine how many +people use the Traffic Server cache, how much information each person +requested, what pages are most popular, and so on. Traffic Server +supports several standard log file formats, such as Squid and Netscape, +as well as user-defined custom formats. You can analyze the standard +format log files with off-the-shelf analysis packages. To help with log +file analysis, you can separate log files so they contain information +specific to protocol or hosts. You can also configure Traffic Server to +roll log files automatically at specific intervals during the day or +when they reach a certain size. + +The following sections describe the Traffic Server logging system +features and discuss how to: + +- **Manage your event log files** + + You can choose a central location for storing log files, set how much + disk space to use for log files, and set how and when to roll log + files. Refer to `Managing Event Log Files`_. + +- **Choose different event log file formats** + + You can choose which standard log file formats you want to use for + traffic analysis, such as Squid or Netscape. Alternatively, you can + use the Traffic Server custom format, which is XML-based and enables + you to institute more control over the type of information recorded + in log files. Refer to `Choosing Event Log File Formats`_. + +- **Roll event log files automatically** + + Configure Traffic Server to roll event log files at specific + intervals during the day or when they reach a certain size; this + enables you to identify and manipulate log files that are no longer + active. Refer to `Rolling Event Log Files`_. + +- **Separate log files according to protocols and hosts** + + Configure Traffic Server to create separate log files for different + protocols. You can also configure Traffic Server to generate separate + log files for requests served by different hosts. Refer to `Splitting Event Log Files`_. + +- **Collate log files from different Traffic Server nodes** + + Designate one or more nodes on the network to serve as log collation + servers. These servers, which might be standalone or part of Traffic + Server, enable you to keep all logged information in well-defined + locations. Refer to `Collating Event Log Files`_. + +- **View statistics about the logging system** + + Traffic Server provides statistics about the logging system; you can + access these statistics via Traffic Line. Refer to `Viewing Logging Statistics`_. + +- **Interpret log file entries for the log file formats** + + Refer to `Example Event Log File Entries`_. + +Managing Event Log Files +------------------------ + +Traffic Server enables you to control where event log files are located +and how much space they can consume. Additionally you can specify how to +handle low disk space in the logging directory. + +Choosing the Logging Directory +------------------------------ + +By default, Traffic Server writes all event log files in the ``logs`` +directory located in the directory where you installed Traffic Server. +To use a different directory, refer to `Setting Log File Management Options`_. + +Controlling Logging Space +------------------------- + +Traffic Server enables you to control the amount of disk space that the +logging directory can consume. This allows the system to operate +smoothly within a specified space window for a long period of time. +After you establish a space limit, Traffic Server continues to monitor +the space in the logging directory. When the free space dwindles to the +headroom limit (see `Setting Log File Management Options`_), it enters +a low space state and takes the following actions: + +- If the autodelete option (discussed in `Rolling Event Log Files`_) + is *enabled*, then Traffic Server + identifies previously-rolled log files (i.e., log files with the + ``.old`` extension). It starts deleting files one by one, beginning + with the oldest file, until it emerges from the low state. Traffic + Server logs a record of all deleted files in the system error log. + +- If the autodelete option is *disabled* or there are not enough old + log files to delete for the system to emerge from its low space + state, then Traffic Server issues a warning and continues logging + until space is exhausted. When available space is consumed, event + logging stops. Traffic Server resumes event logging when enough space + becomes available for it to exit the low space state. To make space + available, either explicitly increase the logging space limit or + remove files from the logging directory manually. + +You can run a :manpage:`cron(8)` script in conjunction with Traffic Server to +automatically remove old log files from the logging directory before +Traffic Server enters the low space state. Relocate the old log files to +a temporary partition, where you can run a variety of log analysis +scripts. Following analysis, either compress the logs and move to an +archive location, or simply delete them. + + +Setting Log File Management Options +----------------------------------- + +To set log management options, follow the steps below: + +1. In the :file:`records.config` file, edit the following variables + + - :ts:cv:`proxy.config.log.logfile_dir` + - :ts:cv:`proxy.config.log.max_space_mb_for_logs` + - :ts:cv:`proxy.config.log.max_space_mb_headroom` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Choosing Event Log File Formats +------------------------------- + +Traffic Server supports the following log file formats: + +- Standard formats, such as Squid or Netscape; refer to `Using Standard Formats`_. +- The Traffic Server custom format; refer to `Using the Custom Format`_. + +In addition to the standard and custom log file format, you can choose +whether to save log files in binary or ASCII; refer to `Choosing Binary or ASCII`_. + +Event log files consume substantial disk space. Creating log entries in +multiple formats at the same time can consume disk resources very +quickly and adversely impact Traffic Server performance. + +Using Standard Formats +~~~~~~~~~~~~~~~~~~~~~~ + +The standard log formats include Squid, Netscape Common, Netscape +extended, and Netscape Extended-2. The standard log file formats can be +analyzed with a wide variety of off-the-shelf log-analysis packages. You +should use one of the standard event log formats unless you need +information that these formats do not provide. Refer to `Using the Custom Format`_. + +Set standard log file format options by following the steps below: + +1. In the :file:`records.config` file, edit the following variables +2. Edit the following variables to use the Squid format: + + - :ts:cv:`proxy.config.log.squid_log_enabled` + - :ts:cv:`proxy.config.log.squid_log_is_ascii` + - :ts:cv:`proxy.config.log.squid_log_name` + - :ts:cv:`proxy.config.log.squid_log_header` + +3. To use the Netscape Common format, edit the following variables: + + - :ts:cv:`proxy.config.log.common_log_enabled` + - :ts:cv:`proxy.config.log.common_log_is_ascii` + - :ts:cv:`proxy.config.log.common_log_name` + - :ts:cv:`proxy.config.log.common_log_header` + +4. To use the Netscape Extended format, edit the following variables: + + - :ts:cv:`proxy.config.log.extended_log_enabled` + - :ts:cv:`proxy.config.log.extended_log_is_ascii` + - :ts:cv:`proxy.config.log.extended_log_name` + - :ts:cv:`proxy.config.log.extended_log_header` + +5. To use the Netscape Extended-2 format, edit the following variables: + + - :ts:cv:`proxy.config.log.extended2_log_enabled` + - :ts:cv:`proxy.config.log.extended2_log_is_ascii` + - :ts:cv:`proxy.config.log.extended2_log_name` + - :ts:cv:`proxy.config.log.extended2_log_header` + +6. Run the command :option:`traffic_line -x` to apply the configuration + changes. + + +.. _using-custom-log-formats: + +Using the Custom Format +~~~~~~~~~~~~~~~~~~~~~~~ + +The XML-based custom log format is more flexible then the standard log +file formats and gives you more control over the type of information +recorded in log files. You should create a custom log format if you need +data for analysis that's not available in the standard formats. You can +decide what information to record for each Traffic Server transaction +and create filters that specify which transactions to log. + +The heart of the XML-based custom logging feature is the XML-based +logging configuration file (:file:`logs_xml.config`) that enables you to +create very modular descriptions of logging objects. The +:file:`logs_xml.config` file uses three types of objects to create custom +log files, as detailed below. To generate a custom log format, you must +specify at least one ``LogObject`` definition (one log file is produced +for each ``LogObject`` definition). + +- The ``LogFormat`` object defines the content of the log file + using printf-style format strings. +- The ``LogFilter`` object defines a filter so that you include or + exclude certain information from the log file. +- The ``LogObject`` object specifies all the information needed to + produce a log file. + + - The name of the log file. (required) + - The format to be used (required). This can be a standard format + (Squid or Netscape) or + - a previously-defined custom format (i.e., a previously-defined + ``LogFormat`` object). + - The file mode: ``ASCII``, ``Binary``, or ``ASCII_PIPE``. The + default is ``ASCII``. + The ``ASCII_PIPE`` mode writes log entries to a UNIX-named pipe + (a buffer in memory); other processes can then read the data using + standard I/O functions. The advantage of this option is that + Traffic Server does not have to write to disk, which frees disk + space and bandwidth for other tasks. When the buffer is full, + Traffic Server drops log entries and issues an error message + indicating how many entries were dropped. Because Traffic Server + only writes complete log entries to the pipe, only full records + are dropped. + - Any filters you want to use (i.e., previously-defined + ``LogFilter`` objects). + - The collation servers that are to receive the log files. + - The protocols you want to log. If the protocols tag is used, then + Traffic Server will only log transactions from the protocols + listed; otherwise, all transactions for all protocols are logged. + - The origin servers you want to log. If the ``servers`` tag is + used, then Traffic Server will only log transactions for the + origin servers listed; otherwise, transactions for all origin + servers are logged. + - The header text you want the log files to contain. The header text + appears at the beginning of the log file, just before the first + record. + - The log file rolling options. + +In order to accomplish this, we + +1. edit the following variables in the + :file:`records.config` file: +2. :ts:cv:`proxy.config.log.custom_logs_enabled` +3. In the + :file:`logs_xml.config` file +4. Add :ref:`LogFormat`, :ref:`LogFilters`, and :ref:`LogObject` + specifications to the configuration file. +5. Save and close the :file:`logs_xml.config` file. +6. Run the command :option:`traffic_line -x` to apply your configuration + changes. + +Creating Summary Log Files +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Traffic Server performs several hundred operations per second; +therefore, event log files can quickly grow to large sizes. Using +SQL-like aggregate operators, you can configure Traffic Server to create +summary log files that summarize a set of log entries over a specified +period of time. This can significantly reduce the size of the log files +generated. + +To generate a summary log file, create a +:ref:`LogFormat` object in the XML-based logging configuration file +(:file:`logs_xml.config`) using +the SQL-like aggregate operators below. You can apply each of these +operators to specific fields, over a specified interval. + +- ``COUNT`` +- ``SUM`` +- ``AVERAGE`` +- ``FIRST`` +- ``LAST`` + +To create a summary log file format, we + +1. Define the format of the log file in + :file:`logs_xml.config` as + follows: + + .. code-block:: xml + + + + : %"/> + + + + where ``operator`` is one of the five aggregate operators + (``COUNT``, ``SUM``, ``AVERAGE``, ``FIRST``, ``LAST``), ``field`` + is the logging field you want to aggregate, and ``n`` is the + interval (in seconds) between summary log entries. You can specify + more than one ``operator`` in the format line. For more + information, refer to :file:`logs_xml.config`. + +2. Run the command :option:`traffic_line -x` to apply configuration changes . + +The following example format generates one entry every 10 seconds. Each +entry contains the timestamp of the last entry of the interval, a count +of the number of entries seen within that 10-second interval, and the +sum of all bytes sent to the client: :: + + + + : % : %"/> + + + +.. important:: + + You cannot create a format specification that contains + both aggregate operators and regular fields. For example, the following + specification would be **invalid**: :: + + : % : % : %"/> + +Choosing Binary or ASCII +~~~~~~~~~~~~~~~~~~~~~~~~ + +You can configure Traffic Server to create event log files in either of +the following: + +- **ASCII** + + These files are human-readable and can be processed using standard, + off-the-shelf log analysis tools. However, Traffic Server must + perform additional processing to create the files in ASCII, which + mildly impacts system overhead. ASCII files also tend to be larger + than the equivalent binary files. By default, ASCII log files have a + ``.log`` filename extension. + +- **Binary** + + These files generate lower system overhead and generally occupy less + space on the disk than ASCII files (depending on the type of + information being logged). However, you must use a converter + application before you can read or analyze binary files via standard + tools. By default, binary log files use a ``.blog`` filename + extension. + +While binary log files typically require less disk space, there are +exceptions. + +For example: the value ``0`` (zero) requires only one byte to store in +ASCII, but requires four bytes when stored as a binary integer. +Conversely: if you define a custom format that logs IP addresses, then a +binary log file would only require four bytes of storage per 32-bit +address. However, the same IP address stored in dot notation would +require around 15 characters (bytes) in an ASCII log file. Therefore, +it's wise to consider the type of data that will be logged before you +select ASCII or binary for your log files. For example, you might try +logging for one day using ASCII and then another day using binary. If +the number of requests is roughly the same for both days, then you can +calculate a rough metric that compares the two formats. + +For standard log formats, select Binary or ASCII (refer to `Setting +Standard Log File Format Options`). For the custom log +format, specify ASCII or Binary mode in the +:ref:`LogObject` +(refer to :ref:`Using the Custom Format `). In addition +to the ASCII and binary options, you can also write custom log entries +to a UNIX-named pipe (i.e., a buffer in memory). Other processes can +then read the data using standard I/O functions. The advantage of using +this option is that Traffic Server does not have to write to disk, which +frees disk space and bandwidth for other tasks. In addition, writing to +a pipe does not stop when logging space is exhausted because the pipe +does not use disk space. Refer to +:file:`logs_xml.config` for more information about the ``ASCII_PIPE`` option. + + +Rolling Event Log Files +----------------------- + +Traffic Server provides automatic log file rolling. This means that at +specific intervals during the day or when log files reach a certain +size, Traffic Server closes its current set of log files and opens new +log files. Depending on the amount of traffic your servers are exposed +to, you should roll log files several times a day. Rolling every six +hours is a good guideline to start with. + +Log file rolling offers the following benefits: + +- It defines an interval over which log analysis can be performed. +- It keeps any single log file from becoming too large and helps to + keep the logging system within the specified space limits. +- It provides an easy way to identify files that are no longer being + used so that an automated script can clean the logging directory and + run log analysis programs. + +Rolled Log Filename Format +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Traffic Server provides a consistent naming scheme for rolled log files +that enables you to easily identify log files. When Traffic Server rolls +a log file, it saves and closes the old file before it starts a new +file. Traffic Server renames the old file to include the following +information: + +- The format of the file (such as ``squid.log``). +- The hostname of the Traffic Server that generated the log file. +- Two timestamps separated by a hyphen (``-``). The first timestamp is + a **lower bound** for the timestamp of the first record in the log + file. The lower bound is the time when the new buffer for log records + is created. Under low load, the first timestamp in the filename can + be different from the timestamp of the first entry. Under normal + load, the first timestamp in the filename and the timestamp of the + first entry are similar. The second timestamp is an **upper bound** + for the timestamp of the last record in the log file (this is + normally the rolling time). +- The suffix ``.old``, which makes it easy for automated scripts to + find rolled log files. + +Timestamps have the following format: :: + + %Y%M%D.%Hh%Mm%Ss-%Y%M%D.%Hh%Mm%Ss + +The following table describes the format: + +``%Y`` + The year in four-digit format. For example: 2000. + +``%M`` + The month in two-digit format, from 01-12. For example: 07. + +``%D`` + The day in two-digit format, from 01-31. For example: 19. + +``%H`` + The hour in two-digit format, from 00-23. For example: 21. + +``%M`` + The minute in two-digit format, from 00-59. For example: 52. + +``%S`` + The second in two-digit format, from 00-59. For example: 36. + +The following is an example of a rolled log filename: :: + + squid.log.mymachine.20110912.12h00m00s-20000913.12h00m00s.old + +The logging system buffers log records before writing them to disk. When +a log file is rolled, the log buffer might be partially full. If it is, +then the first entry in the new log file will have a timestamp earlier +than the time of rolling. When the new log file is rolled, its first +timestamp will be a lower bound for the timestamp of the first entry. + +For example, suppose logs are rolled every three hours, and the first +rolled log file is: :: + + squid.log.mymachine.20110912.12h00m00s-19980912.03h00m00s.old + +If the lower bound for the first entry in the log buffer at 3:00:00 is +2:59:47, then the next log file will have the following timestamp when +rolled: :: + + squid.log.mymachine.20110912.02h59m47s-19980912.06h00m00s.old + +The contents of a log file are always between the two timestamps. Log +files do not contain overlapping entries, even if successive timestamps +appear to overlap. + +Rolling Intervals +~~~~~~~~~~~~~~~~~ + +Log files are rolled at specific intervals relative to a given hour of +the day. Two options control when log files are rolled: + +- The offset hour, which is an hour between 0 (midnight) and 23 +- The rolling interval + +Both the offset hour and the rolling interval determine when log file +rolling starts. Rolling occurs every rolling interval and at the offset +hour. For example, if the rolling interval is six hours and the offset +hour is 0 (midnight), then the logs will roll at midnight (00:00), +06:00, 12:00, and 18:00 each day. If the rolling interval is 12 hours +and the offset hour is 3, then logs will roll at 03:00 and 15:00 each +day. + +Setting Log File Rolling Options +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To set log file rolling options and/or configure Traffic Server to roll +log files when they reach a certain size, follow the steps below: + +1. In the :file:`records.config` file, edit the following variables + + - :ts:cv:`proxy.config.log.rolling_enabled` + - :ts:cv:`proxy.config.log.rolling_size_mb` + - :ts:cv:`proxy.config.log.rolling_offset_hr` + - :ts:cv:`proxy.config.log.rolling_interval_sec` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +You can fine-tune log file rolling settings for a custom log file in the +:ref:`LogObject` +specification in the +:file:`logs_xml.config` file. +The custom log file uses the rolling settings in its +:ref:`LogObject`, +which override the default settings you specify in Traffic Manager or +the :file:`records.config` file described above. + +Splitting Event Log Files +------------------------- + +By default, Traffic Server uses standard log formats and generates log +files that contain HTTP & ICP transactions in the same file. However, +you can enable log splitting if you prefer to log transactions for +different protocols in separate log files. + +ICP Log Splitting +~~~~~~~~~~~~~~~~~ + +When ICP log splitting is enabled, Traffic Server records ICP +transactions in a separate log file with a name that contains +``icp``. For example: if you enable the Squid format, then all ICP +transactions are recorded in the ``squid-icp.log`` file. When you +disable ICP log splitting, Traffic Server records all ICP transactions +in the same log file as HTTP transactions. + +HTTP Host Log Splitting +~~~~~~~~~~~~~~~~~~~~~~~ + +HTTP host log splitting enables you to record HTTP transactions for +different origin servers in separate log files. When HTTP host log +splitting is enabled, Traffic Server creates a separate log file for +each origin server that's listed in the :file:`log_hosts.config` file. +When both ICP and HTTP host log splitting are enabled, Traffic Server generates +separate log files for HTTP transactions (based on the origin server) +and places all ICP transactions in their own respective log files. For +example, if the :file:`log_hosts.config` file contains the two origin +servers ``uni.edu`` and ``company.com`` and Squid format is enabled, +then Traffic Server generates the following log files: + +``squid-uni.edu.log`` + All HTTP transactions for ``uni.edu`` + +``squid-company.com.log`` + All HTTP transactions for ``company.com`` + +``squid-icp.log`` + All ICP transactions for all hosts + +``squid.log`` + All HTTP transactions for other hosts + +If you disable ICP log splitting, then ICP transactions are placed in +the same log file as HTTP transactions. Using the hosts and log format +from the previous example, Traffic Server generates the log files below: + +``squid-uni.edu.log`` + All entries for ``uni.edu`` + +``squid-company.com.log`` + All entries for ``company.com`` + +``squid.log`` + All other entries + +Traffic Server also enables you to create XML-based :ref:`Custom Log Formats ` +that offer even greater control over log file generation. + +Setting Log Splitting Options +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To set log splitting options, follow the steps below: + +1. In the :file:`records.config` file, edit the following variables + + - :ts:cv:`proxy.config.log.separate_icp_logs` + - :ts:cv:`proxy.config.log.separate_host_logs` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Editing the log_hosts.config File +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The default :file:`log_hosts.config` file is located in the Traffic Server +``config`` directory. To record HTTP transactions for different origin +servers in separate log files, you must specify the hostname of each +origin server on a separate line in the :file:`log_hosts.config` file. For +example, if you specify the keyword sports, then Traffic Server records +all HTTP transactions from ``sports.yahoo.com`` and +``www.foxsports.com`` in a log file called ``squid-sports.log`` (if the +Squid format is enabled). + +.. note:: + + If Traffic Server is clustered and you enable log file + collation, then you should use the same :file:`log_hosts.config` file on + every Traffic Server node in the cluster. + +To edit the :file:`log_hosts.config` file follow the steps below: + +1. In the :file:`log_hosts.config` file, + enter the hostname of each origin server on a separate line in the + file, e.g.: :: + + webserver1 + webserver2 + webserver3 + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Collating Event Log Files +------------------------- + +You can use the Traffic Server log file collation feature to collect all +logged information in one place. Log collation enables you to analyze a +set of Traffic Server clustered nodes as a whole (rather than as +individual nodes) and to use a large disk that might only be located on +one of the nodes in the cluster. Traffic Server collates log files by +using one or more nodes as log collation servers and all remaining nodes +as log collation clients. When a Traffic Server node generates a buffer +of event log entries, it first determines if it is the collation server +or a collation client. The collation server node writes all log buffers +to its local disk, just as it would if log collation was not enabled. +Log collation servers can be standalone or they can be part of a node +running Traffic Server. + +The collation client nodes prepare their log buffers for transfer across +the network and send the buffers to the log collation server. When the +log collation server receives a log buffer from a client, it writes it +to its own log file as if it was generated locally. For a visual +representation of this, see the figure below. + +.. figure:: ../static/images/admin/logcolat.jpg + :align: center + :alt: Log collation + + Log collation + +If log clients cannot contact their log collation server, then they +write their log buffers to their local disks, into *orphan* log files. +Orphan log files require manual collation. + +.. note:: + + Log collation can have an impact on network performance. + Because all nodes are forwarding their log data buffers to the single + collation server, a bottleneck can occur. In addition, collated log + files contain timestamp information for each entry, but entries in the + files do not appear in strict chronological order. You may want to sort + collated log files before doing analysis. + +To configure Traffic Server to collate event log files, you must perform +the following tasks: + +- Either `Configure Traffic Server Node to Be a Collation + Server `_ or install & configure a + `Standalone Collator `_ +- `Configure Traffic Server Nodes to Be a Collation + Clients `_. +- Add an attribute to the + :ref:`LogObject` + specification in the + :file:`logs_xml.config` file + if you are using custom log file formats; refer to `Collating Custom + Event Log Files`_. + +Configuring Traffic Server to Be a Collation Server +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To configure a Traffic Server node to be a collation server, simply edit +a configuration file via the steps below. + +1. In the :file:`records.config` file, edit the following variables + + - :ts:cv:`proxy.local.log.collation_mode` (``1`` for server mode) + - :ts:cv:`proxy.config.log.collation_port` + - :ts:cv:`proxy.config.log.collation_secret` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +.. note:: + + If you modify the ``collation_port`` or ``secret`` after + connections between the collation server and collation clients have been + established, then you must restart Traffic Server. + +Using a Standalone Collator +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you do not want the log collation server to be a Traffic Server node, +then you can install and configure a standalone collator (SAC) that will +dedicate more of its power to collecting, processing, and writing log +files. + +To install and configure a standalone collator: + +1. Configure your Traffic Server nodes as log collation clients; refer + to `Configuring Traffic Server to Be a Collation + Client <#ConfiguringTSCollationClient>`_. +2. Copy the :program:`traffic_sac` binary from the Traffic Server ``bin`` + directory and +3. Copy the ``libtsutil.so`` libraries from the Traffic Server ``lib`` + directory to the machine serving as the standalone collator. +4. Create a directory called ``config`` in the directory that contains + the :program:`traffic_sac` binary. +5. Create a directory called ``internal`` in the ``config`` directory + you created in Step 4 (above). This directory is used internally by + the standalone collator to store lock files. +6. Copy the :file:`records.config` file from a Traffic Server node + configured to be a log collation client to the ``config`` directory + you created in Step 4 on the standalone collator. + The :file:`records.config` file contains the log collation secret and + the port you specified when configuring Traffic Server nodes to be + collation clients. The collation port and secret must be the same for + all collation clients and servers. +7. In the :file:`records.config` + file, edit the following variable + + - :ts:cv:`proxy.config.log.logfile_dir` + +8. Enter the following command:: + + traffic_sac -c config + +Configuring Traffic Server to Be a Collation Client +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To configure a Traffic Server node to be a collation client, follow the +steps below. If you modify the ``collation_port`` or ``secret`` after +connections between the collation clients and the collation server have +been established, then you must restart Traffic Server. + +1. In the :file:`records.config` file, edit the following variables: + + - :ts:cv:`proxy.local.log.collation_mode`: ``2`` to configure this node as log collation client and send + standard formatted log entries to the collation server. + For XML-based formatted log entries, see :file:`logs_xml.config` + file; refer to :ref:`Using the Custom Format `. + - :ts:cv:`proxy.config.log.collation_host` + - :ts:cv:`proxy.config.log.collation_port` + - :ts:cv:`proxy.config.log.collation_secret` + - :ts:cv:`proxy.config.log.collation_host_tagged` + - :ts:cv:`proxy.config.log.max_space_mb_for_orphan_logs` + +2. Run the command :option:`traffic_line -x` to apply the configuration + changes. + +Collating Custom Event Log Files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you use custom event log files, then you must edit the +:file:`logs_xml.config` file (in addition to configuring a collation server +and collation clients). + +To collate custom event log files + +1. On each collation client, edit the `:file:`logs_xml.config` +2. Add the + :ref:`CollationHost ` attribute to the + :ref:`LogObject` + specification: :: + + + + + + + + where ``ipaddress`` is the hostname or IP address of the collation + server to which all log entries (for this object) are forwarded, and + ``port`` is the port number for communication between the collation + server and collation clients. + +3. Run the command :option:`traffic_line -L` to restart Traffic Server on the + local node or :option:`traffic_line -M` to restart Traffic Server on all + the nodes in a cluster. + +Viewing Logging Statistics +========================== + +Traffic Server generates logging statistics that enable you to see the +following information: + +- How many log files (formats) are currently being written. +- The current amount of space used by the logging directory, which + contains all event and error logs. +- The number of access events written to log files since Traffic Server + installation. This counter represents one entry in one file; if + multiple formats are being written, then a single event creates + multiple event log entries. +- The number of access events skipped (because they were filtered) + since Traffic Server installation. +- The number of access events written to the event error log since + Traffic Server installation. + +You can retrieve the statistics via the Traffic Line command-line +interface; refer to `Monitoring Traffic <../monitoring-traffic>`_. + +Viewing Log Files +================= + +You can view the system, event, and error log files Traffic Server +creates. You can also delete a log file or copy it to your local system +if you have the correct user permissions. Traffic Server displays only +one MB of information in the log file. If the log file you select to +view is bigger than 1MB, then Traffic Server truncates the file and +displays a warning message indicating that the file is too big. + +Online Event Log XML Builder +============================ + +If you need any assistance building your event log, you can try out our +`online log builder `_. This is a work in progress, so any +comments, critique or suggestions are most welcome. + +Example Event Log File Entries +============================== + +This section shows an example log file entry in each of the standard log +formats supported by Traffic Server: Squid, Netscape Common, Netscape Extended, +and Netscape Extended-2. + +.. _log-formats-squid-format: + +Squid Format +------------ + +The following figure shows a sample log entry in a ``squid.log`` file. + +.. figure:: ../static/images/admin/squid_format.jpg + :align: center + :alt: Sample log entry in squid.log + + Sample log entry in squid.log + +====== ========= ============================================================================= +Field Symbol Description +====== ========= ============================================================================= +1 cqtq The client request timestamp in Squid format; the time of the client + request in seconds since January 1, 1970 UTC (with millisecond resolution). +2 ttms The time Traffic Server spent processing the client request; the + number of milliseconds between the time the client established the + connection with Traffic Server and the time Traffic Server sent the + last byte of the response back to the client. +3 chi The IP address of the client’s host machine. +4 crc/pssc The cache result code; how the cache responded to the request: + ``HIT``, ``MISS``, and so on. Cache result codes are described + :ref:`here `. + The proxy response status code (the HTTP response status code from + Traffic Server to client). +5 psql The length of the Traffic Server response to the client in bytes, + including headers and content. +6 cqhm The client request method: ``GET``, ``POST``, and so on. +7 cauc The client request canonical URL; blanks and other characters that + might not be parsed by log analysis tools are replaced by escape + sequences. The escape sequence is a percentage sign followed by the + ASCII code number of the replaced character in hex. +8 caun The username of the authenticated client. A hyphen (``-``) means + that no authentication was required. +9 phr/pqsn The proxy hierarchy route; the route Traffic Server used to retrieve + the object. +10 psct The proxy response content type; the object content type taken from + the Traffic Server response header. +====== ========= ============================================================================= + +Squid log in XML +~~~~~~~~~~~~~~~~ + +This is the equivalent XML configuration for the log above:: + + + + % % %/% % % % + % %/% %"/> + + +Netscape Common +--------------- + +.. figure:: ../static/images/admin/netscape_common_format.jpg + :align: center + :alt: Sample log entry in common.log + + Sample log entry in common.log + +====== ========= ============================================================================= +Field Symbol Description +====== ========= ============================================================================= +1 chi The IP address of the client's host machine. +2 -- This hyphen (``-``) is always present in Netscape log entries. +3 caun The authenticated client username. A hyphen (``-``) means no + authentication was required. +4 cqtd The date and time of the client request, enclosed in brackets. +5 cqtx The request line, enclosed in quotes. +6 pssc The proxy response status code (HTTP reply code). +7 pscl The length of the Traffic Server response to the client in bytes. +====== ========= ============================================================================= + +Netscape Common in XML +~~~~~~~~~~~~~~~~~~~~~~ + +This is the equivalent XML configuration for the log above:: + + + + - % [%] \"%\" % %"/> + + +Netscape Extended +----------------- + +.. figure:: ../static/images/admin/netscape_extended_format.jpg + :align: center + :alt: Sample log entry in extended.log + + Sample log entry in extended.log + +In addition to field 1-7 from the Netscape Common log format above, the Extended format also adds +the following fields: + +====== ========= ============================================================================= +Field Symbol Description +====== ========= ============================================================================= +8 sssc The origin server response status code. +9 sshl The server response transfer length; the body length in the origin + server response to Traffic Server, in bytes. +10 cqbl The client request transfer length; the body length in the client + request to Traffic Server, in bytes. +11 pqbl The proxy request transfer length; the body length in the Traffic + Server request to the origin server. +12 cqhl The client request header length; the header length in the client + request to Traffic Server. +13 pshl The proxy response header length; the header length in the Traffic + Server response to the client. +14 pqhl The proxy request header length; the header length in Traffic Server + request to the origin server. +15 sshl The server response header length; the header length in the origin + server response to Traffic Server. +16 tts The time Traffic Server spent processing the client request; the + number of seconds between the time that the client established the + connection with Traffic Server and the time that Traffic Server sent + the last byte of the response back to the client. +====== ========= ============================================================================= + +Netscape Extended in XML +~~~~~~~~~~~~~~~~~~~~~~~~ + +This is the equivalent XML configuration for the log above:: + + + + - % [%] \"%\" % % + % % % % % % % % %"/> + + +Netscape Extended2 +------------------ + +.. figure:: ../static/images/admin/netscape_extended2_format.jpg + :align: center + :alt: Sample log entry in extended2.log + + Sample log entry in extended2.log + +In addition to field 1-16 from the log formats above, the Extended2 format also adds +the following fields: + +====== ========= ============================================================================= +Field Symbol Description +====== ========= ============================================================================= +17 phr The proxy hierarchy route; the route Traffic Server used to retrieve + the object. +18 cfsc The client finish status code: ``FIN`` if the client request + completed successfully or ``INTR`` if the client request was + interrupted. +19 pfsc The proxy finish status code: ``FIN`` if the Traffic Server request + to the origin server completed successfully or ``INTR`` if the + request was interrupted. +20 crc The cache result code; how the Traffic Server cache responded to the + request: HIT, MISS, and so on. Cache result codes are described + :ref:`here `. +====== ========= ============================================================================= + +Netscape Extended2 in XML +~~~~~~~~~~~~~~~~~~~~~~~~~ + +This is the equivalent XML configuration for the log above:: + + + + - % [%] \"%\" % % + % % % % % % % % % % % % %"/> + + +.. _squid-netscape-result-codes: + +Squid- and Netscape-format: Cache Result Codes +============================================== + +The following table describes the cache result codes in Squid and +Netscape log files. + +``TCP_HIT`` + A valid copy of the requested object was in the cache and Traffic + Server sent the object to the client. + +``TCP_MISS`` + The requested object was not in cache, so Traffic Server retrieved + the object from the origin server (or a parent proxy) and sent it to + the client. + +``TCP_REFRESH_HIT`` + The object was in the cache, but it was stale. Traffic Server made an + ``if-modified-since`` request to the origin server and the + origin server sent a ``304`` not-modified response. Traffic + Server sent the cached object to the client. + +``TCP_REF_FAIL_HIT`` + The object was in the cache but was stale. Traffic Server made an + ``if-modified-since`` request to the origin server but the server + did not respond. Traffic Server sent the cached object to the + client. + +``TCP_REFRESH_MISS`` + The object was in the cache but was stale. Traffic Server made an + ``if-modified-since`` request to the origin server and the server + returned a new object. Traffic Server served the new object to the + client. + +``TCP_CLIENT_REFRESH`` + The client issued a request with a ``no-cache`` header. Traffic + Server obtained the requested object from the origin server and sent + a copy to the client. Traffic Server deleted the previous copy of + the object from cache. + +``TCP_IMS_HIT`` + The client issued an ``if-modified-since`` request and the object + was in cache & fresher than the IMS date, **or** an + ``if-modified-since`` request to the origin server revealed the + cached object was fresh. Traffic Server served the cached object to + the client. + +``TCP_IMS_MISS`` + The client issued an + ``if-modified-since request``, and the object was either not in + cache or was stale in cache. Traffic Server sent an + ``if-modified-since request`` to the origin server and received the + new object. Traffic Server sent the updated object to the client. + +``TCP_SWAPFAIL`` + The object was in the cache but could not be accessed. The client + did not receive the object. + +``ERR_CLIENT_ABORT`` + The client disconnected before the complete object was sent. + +``ERR_CONNECT_FAIL`` + Traffic Server could not reach the origin server. + +``ERR_DNS_FAIL`` + The Domain Name Server (DNS) could not resolve the origin server + name, or no DNS could be reached. + +``ERR_INVALID_REQ`` + The client HTTP request was invalid. (Traffic Server forwards + requests with unknown methods to the origin server.) + +``ERR_READ_TIMEOUT`` + The origin server did not respond to Traffic Server's request within + the timeout interval. + +``ERR_PROXY_DENIED`` + Client service was denied. + +``ERR_UNKNOWN`` + The client connected, but subsequently disconnected without sending + a request. diff --git a/doc/arch/cache/cache-api.en.rst b/doc/arch/cache/cache-api.en.rst new file mode 100644 index 00000000..7a9963e9 --- /dev/null +++ b/doc/arch/cache/cache-api.en.rst @@ -0,0 +1,42 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. include:: common.defs + +=========================== +Cache Related API functions +=========================== + +.. c:function:: void TSHttpTxnReqCacheableSet(TSHttpTxn txnp, int flag) + + Set a *flag* that marks a request as cacheable. This is a positive override only, setting *flag* to 0 restores the default behavior, it does not force the request to be uncacheable. + +.. c:function:: TSReturnCode TSCacheUrlSet(TSHttpTxn txnp, char const* url, int length) + + Set the cache key for the transaction *txnp* as the string pointed at by *url* of *length* characters. It need not be ``null`` terminated. This should be called from ``TS_HTTP_READ_REQUEST_HDR_HOOK`` which is before cache lookup but late enough that the HTTP request header is available. + +=============== +Cache Internals +=============== + +.. cpp:function:: int DIR_SIZE_WITH_BLOCK(int big) + + A preprocessor macro which computes the maximum size of a fragment based on the value of *big*. This is computed as if the argument where the value of the *big* field in a struct :cpp:class:`Dir`. + +.. cpp:function:: int DIR_BLOCK_SIZE(int big) + + A preprocessor macro which computes the block size multiplier for a struct :cpp:class:`Dir` where *big* is the *big* field value. diff --git a/doc/arch/cache/cache-appendix.en.rst b/doc/arch/cache/cache-appendix.en.rst new file mode 100644 index 00000000..2b69adca --- /dev/null +++ b/doc/arch/cache/cache-appendix.en.rst @@ -0,0 +1,99 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. include:: common.defs + +Cache Tools +~~~~~~~~~~~ + +Tools and techniques for cache monitoring and inspection. + +* :ref:`The cache inspector `. + +Topics to be done +~~~~~~~~~~~~~~~~~ + +* Resident alternates +* Object refresh + +Cache Consistency +~~~~~~~~~~~~~~~~~ + +The cache is completely consistent, up to and including kicking the power cord out, if the write buffer on consumer disk drives is disabled. You need to use:: + + hdparm -W0 + +The cache validates that all the data for the document is available and will silently mark a partial document as a "miss" on read. There is no "gentle" shutdown for traffic server, you just kill the process, so the "recovery" code (fsck) is run every time traffic server starts up. + +On startup the two versions of the index are checked, and the last valid one is read into memory. Then traffic server moves forward from the last snapped write cursor and reads all the fragments written to disk, and updates the directory (as in a log-based file system). It stops reading at the write before the last valid write header it sees (as a write is not necessarily atomic because of sector reordering). Then the new updated index is written to the invalid version (in case of a crash during startup) and the system starts. + +.. _volume tagging: + +Volume Tagging +~~~~~~~~~~~~~~ + +Currently cache volumes are allocated somewhat arbitrarily from storage elements. `This enhancement `__ allows the :file:`storage.config` file to assign storage units to specific volumes although the volumes must still be listed in :file:`volume.config` in general and in particular to map domains to specific volumes. A primary use case for this is to be able to map specific types of content to different storage elements. This could to have different storage devices for the content (SSD vs. rotational). + +--------------- +Version Upgrade +--------------- + +It is currently the case that any change to the cache format will clear the cache. This is an issue when upgrading the |TS| version and should be kept in mind. + +.. cache-key: + +------------------------- +Controlling the cache key +------------------------- + +The cache key is by default the URL of the request. There are two possible choices, the original ("pristine") URL and the remapped URL. Which of these is used is determined by the configuration value :ts:cv:`proxy.config.url_remap.pristine_host_hdr`. + +This is an ``INT`` value. If set to ``0`` (disabled) then the remapped URL is used, and if it is not ``0`` (enabled) then the original URL is used. This setting also controls the value of the ``HOST`` header that is placed in the request sent to the origin server, using hostname from the original URL if non-``0`` and the host name from the remapped URL if ``0``. It has no other effects. + +For caching, this setting is irrelevant if no remapping is done or there is a one to one mapping between the original and remapped URLs. + +It becomes significant if multiple original URLs are mapped to the same remapped URL. If pristine headers are enabled requests to different original URLs will be stored as distinct objects in the cache. If disabled the remapped URL will be used and there may be collisions. This is bad if the contents different but quite useful if they are the same (e.g., the original URLs are just aliases for the same underlying server). + +This is also an issue if a remapping is changed because it is effectively a time axis version of the previous case. If an original URL is remapped to a different server address then the setting determines if existing cached objects will be served for new requests (enabled) or not (disabled). Similarly if the original URL mapped to a particular URL is changed then cached objects from the initial original URL will be served from the updated original URL if pristine headers is disabled. + +These collisions are not of themselves good or bad. An administrator needs to decide which is appropriate for their situation and set the value correspondingly. + +If a greater degree of control is desired a plugin must be used to invoke the API call :c:func:`TSCacheUrlSet()` to provide a specific cache key. The :c:func:`TSCacheUrlSet()` API can be called as early as ``TS_HTTP_READ_REQUEST_HDR_HOOK``, but no later than ``TS_HTTP_POST_REMAP_HOOK``. It can be called only once per transaction; calling it multiple times has no additional effect. + +A plugin that changes the cache key *must* do so consistently for both cache hit and cache miss requests because two different requests that map to the same cache key will be considered equivalent by the cache. Use of the URL directly provides this and so must any substitute. This is entirely the responsibility of the plugin, there is no way for the |TS| core to detect such an occurrence. + +If :c:func:`TSHttpTxnCacheLookupUrlGet()` is called after new cache url set by :c:func:`TSCacheUrlSet()`, it should use a URL location created by :c:func:`TSUrlCreate()` as its 3rd input parameter instead of getting url_loc from client request. + +It is a requirement that the string be syntactically a URL but otherwise it is completely arbitrary and need not have any path. For instance if the company Network Geographics wanted to store certain content under its own cache key, using a document GUID as part of the key, it could use a cache key like :: + + ngeo://W39WaGTPnvg + +The scheme ``ngeo`` was picked because it is *not* a valid URL scheme and so will not collide with any valid URL. + +This can be useful if the URL encodes both important and unimportant data. Instead of storing potentially identical content under different URLs (because they differ on the unimportant parts) a url containing only the important parts could be created and used. + +For example, suppose the URL for Network Geographics content encoded both the document GUID and a referral key. :: + + http://network-geographics-farm-1.com/doc/W39WaGTPnvg.2511635.UQB_zCc8B8H + +We don't want to the same content for every possible referrer. Instead we could use a plugin to convert this to the previous example and requests that differed only in the referrer key would all reference the same cache entry. Note that we would also map :: + + http://network-geographics-farm-56.com/doc/W39WaGTPnvg.2511635.UQB_zCc8B8H + +to the same cache key. This can be handy for "sharing" content between servers when that content is identical. Note also the plugin can change the cache key or not depending on any data in the request header, for instance not changing the cache key if the request is not in the ``doc`` directory. If distinguishing servers is important that can easily be pulled from the request URL and used in the synthetic cache key. The implementor is free to extract all relevant elements for use in the cache key. + +While there is explicit no requirement that the synthetic cache key be based on the HTTP request header, in practice it is generally necessary due to the consistency requirement. Because cache lookup happens before attempting to connect to the origin server no data from the HTTP response header is available, leaving only the request header. The most common case is the one described above where the goal is to elide elements of the URL that do not affect the content to minimize cache footprint and improve cache hit rates. diff --git a/doc/arch/cache/cache-arch.en.rst b/doc/arch/cache/cache-arch.en.rst new file mode 100644 index 00000000..85384efb --- /dev/null +++ b/doc/arch/cache/cache-arch.en.rst @@ -0,0 +1,854 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Cache Architecture +****************** + +.. include:: common.defs + +Introduction +~~~~~~~~~~~~ + +In addition to an HTTP proxy, |ATS| is also an HTTP cache. |TS| can cache any octet stream although it currently +supports only those octet streams delivered by the HTTP protocol. When such a stream is cached (along with the HTTP +protocol headers) it is termed an :term:`object ` in the cache. Each object is identified by a globally +unique value called a :term:`cache key`. + +The purpose of this document is to describe the basic structure and implementation details of the |TS| cache. +Configuration of the cache will be discussed only to the extent needed to understand the internal mechanisms. This +document will be useful primarily to |TS| developers working on the |TS| codebase or plugins for |TS|. It is assumed the +reader is already familiar with the :ref:`admin-guide` and specifically with :ref:`http-proxy-caching` and +:ref:`configuring-the-cache` along with the associated configuration files and values. + +Unfortunately the internal terminology is not particularly consistent so this document will frequently use terms in +different ways than they are used in the code in an attempt to create some consistency. + +Cache Layout +~~~~~~~~~~~~ + +The following sections describe how persistent cache data is structured. |TS| treats its persisent storage an +undifferentiated collection of bytes, assuming no other structure to it. In particular it does not use the file system +of the host operating system. If a file is used it is used only to mark out the set of bytes to be used. + +Cache storage +============= + +The raw storage for the |TS| cache is configured in :file:`storage.config`. Each line in the file defines a :term:`cache +span` which is treated as a uniform persistent store. + +.. figure:: images/cache-spans.png + :align: center + + Two cache spans + +This storage organized in to a set of :term:`cache volume`\ s which are defined in :file:`volume.config` for the +purposes of the administrator. These are the units that used for all other administator level configuration. + +Cache volumes can be defined by a percentage of the total storage or an absolute amount of storage. By default each +cache volume is spread across all of the cache spans for robustness. The intersection of a cache volume and a cache span +is a :term:`cache stripe`. Each cache span is divided in to cache stripes and each cache volume is a collection of those +stripes. + +If the cache volumes for the example cache spans were defined as + +.. image:: images/ats-cache-volume-definition.png + :align: center + +then the actual layout would look like + +.. image:: images/cache-span-layout.png + :align: center + +Cache stripes are the fundamental unit of cache for the implementation. A cached object is stored entirely in a single +stripe, and therefore in a single cache span - objects are never split across cache spans or volumes. Objects are +assigned to a stripe (and hence to a cache volume) automatically based on a hash of the URI used to retrieve the object +from the origin server. It is possible to configure this to a limited extent in :file:`hosting.config` which supports +content from specific host or domain to be stored on specific cache volumes. In addition, as of version 4.0.1 it is +possible to control which cache spans (and hence, which cache stripes) are contained in a specific cache volume. + +The layout and structure of the cache spans, the cache volumes, and the cache stripes that compose them are derived +entirely from the :file:`storage.config` and :file:`cache.config` and is recomputed from scratch when the +:program:`traffic_server` is started. Therefore any change to those files can (and almost always will) invalidate the +existing cache in its entirety. + +Stripe Structure +================ + +|TS| treats the storage associated with a cache stripe as an undifferentiated span of bytes. Internally each stripe is +treated almost entirely independently. The data structures described in this section are duplicated for each stripe. +Internally the term "volume" is used for these stripes and implemented primarily in :cpp:class:`Vol`. What a user thinks +of as a volume (what this document calls a "cache volume") is represented by :cpp:class:`CacheVol`. + +.. note:: + + Stripe assignment must be done before working with an object because the directory is local to the stripe. Any cached + objects for which the stripe assignment is changed are effectively lost as their directory data will not be found in + the new stripe. + +.. index:: cache directory +.. _cache-directory: + +Cache Directory +--------------- + +.. index:: directory entry +.. index:: fragment +.. index:: cache ID + +.. _fragment: + +Content in a stripe is tracked via a directory. We call each element of the directory a "directory entry" and each is +represented by :cpp:class:`Dir`. Each entry refers to a chunk of contiguous storage in the cache. These are referred to +variously as "fragments", "segments", "docs" / "documents", and a few other things. This document will use the term +"fragment" as that is the most common reference in the code. The term "Doc" (for :cpp:class:`Doc`) will be used to refer +to the header data for a fragment. Overall the directory is treated as a hash with the :term:`cache ID` as the key. See +:ref:`directory probing ` for how the cache ID is used to locate a directory entry. The cache ID +is in turn computed from a :term:`cache key` which by default is the URL of the content. + +The directory is used as a memory resident structure which means a directory entry is as small as possible (currently 10 +bytes). This forces some compromises on the data that can be stored there. On the other hand this means that most cache +misses do not require disk I/O which has a large performance benefit. + +An additional point is the directory is always fully sized. Once a stripe is initialized the directory size is +fixed and never changed. This size is related (roughly linearly) to the size of the stripe. It is for this reason the +memory footprint of |TS| depends strongly on the size of the disk cache. Because the directory size does not change, +neither does this memory requirement so |TS| does not consume more memory as more content is stored in the cache. If +there is enough memory to run |TS| with an empty cache there is enough to run it with a full cache. + +.. figure:: images/cache-directory-structure.png + :align: center + +Each entry stores an offset in the stripe and a size. The size stored in the directory entry is an :ref:`approximate +size ` which is at least as big as the actual data in the fragment. Exact size data is stored in the fragment +header on disk. + +.. note:: + + Data in HTTP headers cannot be examined without disk I/O. This includes the original URL for the object. The cache + key is not stored explicitly and therefore cannot be reliably retrieved. + +The directory is a hash table that uses `chaining +`_ for collision resolution. Because each +entry is small they are used directly as the list header of the hash bucket. + +.. _dir-segment: +.. _dir-bucket: + +Chaining is implemented by imposing grouping structures on the entries in a directory. The first level grouping is a +:term:`directory bucket`. This is a fixed number (currently 4 - defined as ``DIR_DEPTH``) of entries. This +serves to define the basic hash buckets with the first entry in each cache bucket serving as the root of the hash +bucket. + +.. note:: + + The term "bucket" is used in the code to mean both the conceptual bucket for hashing and for a structural grouping + mechanism in the directory and so these will be qualified as needed to distinguish them. The unqualified term + "bucket" is almost always used to mean the structural grouping in the directory. + +Directory buckets are grouped in to :term:`segments `. All segments in a stripe have the same number of +buckets. The number of segments in a stripe is chosen so that each segment has as many buckets as possible without +exceeding 65535 (2\ :sup:`16`\ -1) entries in a segment. + +.. figure:: images/dir-segment-bucket.png + :align: center + +Each directory entry has a previous and next index value which is used to link entries in the same segment. Because no +segment has more than 65535 entries 16 bits suffices for storing the index values. The stripe header contains an array +of entry indices which are used as the roots of entry free lists, one for each segment. Active entries are stored via +the bucket structure. When a stripe is initialized the first entry in each bucket is zeroed (marked unused) and all +other entries are put in the corresponding segment free list in the stripe header. This means the first entry of each +directory bucket is used as the root of a hash bucket and is therefore marked unused rather than being put a free list. +The other entries in the directory bucket are preferentially preferred for adding to the corresponding hash bucket but +this is not required. The segment free lists are initialized such that the extra bucket entries are added in order - all +the seconds, then the thirds, then the fourths. Because the free lists are FIFOs this means extra entries will be +selected from the fourth entries across all the buckets first, then the thirds, etc. When allocating a new directory +entry in a bucket the entries are searched from first to last, which maximizes bucket locality (that is, cache IDs that +map to the same hash bucket will also tend to use the same directory bucket). + +.. figure:: images/dir-bucket-assign.png + :align: center + +Entries are removed from the free list when used and returned when no longer in use. When a fragment needs to be put in +to the directory the cache ID is used to locate a hash bucket (which also determines the segment and directory bucket). +If the first entry in the directory bucket is marked unused, it is used. If not then the other entries in the bucket are +searched and if any are on the free list, that entry is used. If none are available then the first entry on the segment +free list is used. This entry is attached to the hash bucket via the same next and previous indices used for the free +list so that it can be found when doing a lookup of a cache ID. + +Storage Layout +-------------- + +The storage layout is the stripe metadata followed by cached content. The metadata consists of three parts - the stripe +header, the directory, and the stripe footer. The metadata is stored twice. The header and the footer are instances of +:cpp:class:`VolHeaderFooter`. This is a stub structure which can have a trailing variable sized array. This array is +used as the segment free list roots in the directory. Each contains the segment index of the first element of the free +list for the segment. The footer is a copy of the header without the segment free lists. This makes the size of the +header dependent on the directory but not that of the footer. + +.. figure:: images/cache-stripe-layout.png + :align: center + +Each stripe has several values that describe its basic layout. + +skip + The start of stripe data. This represents either space reserved at the start of a physical device to avoid problems + with the host operating system, or an offset representing use of space in the cache span by other stripes. + +start + The offset for the start of the content, after the stripe metadata. + +length + Total number of bytes in the stripe. :cpp:member:`Vol::len`. + +data length + Total number of blocks in the stripe available for content storage. :cpp:member:`Vol::data_blocks`. + +.. note:: Great care must be taken with sizes and lengths in the cache code because there are at least three different metrics (bytes, cache blocks, store blocks) used in various places. + +The total size of the directory (the number of entries) is computed by taking the size of the stripe and dividing by the +average object size. The directory always consumes this amount of memory which has the effect that if cache size is +increased so is the memory requirement for |TS|. The average object size defaults to 8000 bytes but can be configured +using :ts:cv:`proxy.config.cache.min_average_object_size`. Increasing the average object size will reduce the memory +footprint of the directory at the expense of reducing the number of distinct objects that can be stored in the cache +[#]_. + +.. index: write cursor +.. _write-cursor: + +The content area stores the actual objects and is used as a circular buffer where new objects overwrite the least +recently cached objects. The location in a stripe where new cache data is written is called the *write cursor*. This +means that objects can be de facto evicted from cache even if they have not expired if the data is overwritten by the +write cursor. If an object is overwritten this is not detected at that time and the directory is not updated. Instead it +will be noted if the object is accessed in the future and the disk read of the fragment fails. + +.. figure:: images/ats-cache-write-cursor.png + :align: center + + The write cursor and documents in the cache. + +.. note:: Cache data on disk is never updated. + +This is a key thing to keep in mind. What appear to be updates (such as doing a refresh on stale content and getting +back a 304) are actually new copies of data being written at the write cursor. The originals are left as "dead" space +which will be consumed when the write cursor arrives at that disk location. Once the stripe directory is updated (in +memory!) the original fragment in the cache is effectively destroyed. This is the general space management techinque +used in other cases as well. If an object needs to removed from cache, only the directory needs to be changed. No other +work (and *particularly* no disk I/O) needs to be done. + +Object Structure +================ + +Objects are stored as two types of data, metadata and content data. Metadata is all the data about the object and the +content and includes the HTTP headers. The content data is the content of the object, the octet stream delivered to the +client as the object. + +Objects are rooted in a :cpp:class:`Doc` structure stored in the cache. :cpp:class:`Doc` serves as the header data for a +fragment and is contained at the start of every fragment. The first fragment for an object is termed the "first ``Doc``" +and always contains the object metadata. Any operation on the object will read this fragment first. The fragment is +located by converting the cache key for the object to a cache ID and then doing a lookup for a directory entry with that +key. The directory entry has the offset and approximate size of the first fragment which is then read from the disk. +This fragment will contain the request header and response along with overall object properties (such as content +length). + +.. index:: alternate + +|TS| supports `varying content `_ for objects. These +are called *alternates*. All metadata for all alternates is stored in the first fragment including the set of alternates +and the HTTP headers for them. This enables `alternate selection +`_ to be +done after the first ``Doc`` is read from disk. An object that has more than one alternate will have the alternate +content stored separately from the first fragment. For objects with only one alternate the content may or may not be in +the same (first) fragment as the metadata. Each separate alternate content is allocated a directory entry and the key +for that entry is stored in the first fragment metadata. + +Prior to version 4.0.1 the header data was stored in the :cpp:class:`CacheHTTPInfoVector` class which was marshaled to a +variable length area of the on disk image, followed by information about additional fragments if needed to store the +object. + +.. figure:: images/cache-doc-layout-3-2-0.png + :align: center + + ``Doc`` layout 3.2.0 + +This had the problem that with only one fragment table it could not be reliable for objects with more than one alternate +[#]_. Therefore the fragment data was moved from being a separate variable length section of the metadata to being +directly incorporated in to the :cpp:class:`CacheHTTPInfoVector`, yielding a layout of the following form. + +.. figure:: images/cache-doc-layout-4-0-1.png + :align: center + + ``Doc`` layout 4.0.1 + +Each element in the vector contains for each alternate, in addition to the HTTP headers and the fragment table (if any), +a cache key. This cache key identifies a directory entry that is referred to as the "earliest ``Doc``". This is the +location where the content for the alternate begins. + +When the object is first cached, it will have a single alternate and that will be stored (if not too large) in first +``Doc``. This is termed a *resident alternate* in the code. This can only happen on the initial store of the object. If +the metadata is updated (such as a ``304`` response to an ``If-Modified-Since`` request) then unless the object is +small, the object data will be left in the original fragment and a new fragment written as the first fragment, making +the alternate non-resident. "Small" is defined as a length smaller than :ts:cv:`proxy.config.cache.alt_rewrite_max_size`. + +.. note:: + + The :cpp:class:`CacheHTTPInfoVector` is stored only in the first ``Doc``. Subsequent ``Doc`` instances for the + object, including the earliest ``Doc``, should have an ``hlen`` of zero and if not, it is ignored. + +Large objects are split in to multiple fragments when written to the cache. This is indicated by a total document length +that is longer than the content in first ``Doc`` or an earliest ``Doc``. In such a case a fragment offset table is +stored. This contains the byte offset in the object content of the first byte of content data for each fragment past the +first (as the offset for the first is always zero). This allows range requests to be serviced much more efficiently for +large objects, as intermediate fragments that do not contain data in the range can be skipped. The last fragment in the +sequence is detected by the fragment size and offset reaching the end of the total size of the object, there is no +explicit end mark. Each fragment is computationally chained from the previous in that the cache key for fragment N is +computed by:: + + key_for_N_plus_one = next_key(key_for_N); + +where ``next_key`` is a global function that deterministically computes a new cache key from an existing cache key. + +Objects with multiple fragments are laid out such that the data fragments (including the earliest ``Doc``) are written +first and the first ``Doc`` is written last. When read from disk, both the first and earliest ``Doc`` are validated +(tested to ensure that they haven't been overwritten by the write cursor) to verify that the entire document is present +on disk (as they bookend the other fragments - the write cursor cannot overwrite them without overwriting at leastone of +the verified ``Doc`` instances). Note that while the fragments of a single object are ordered they are not necessarily +contiguous as data from different objects are interleaved as the data arrives in |TS|. + +.. figure:: images/cache-multi-fragment.png + :align: center + + Multi-alternate and multi-fragment object storage + +.. index:: pinned + +Documents which are "pinned" into the cache must not be overwritten so they are "evacuated" from in front of the write +cursor. Each fragment is read and rewritten. There is a special lookup mechanism for objects that are being evacuated so +that they can be found in memory rather than the potentially unreliable disk regions. The cache scans ahead of the write +cursor to discover pinned objects as there is a dead zone immediately before the write cursor from which data cannot be +evacuated. Evacuated data is read from disk and placed in the write queue and written as its turn comes up. + +It appears that objects can only be pinned via the :file:`cache.config` file and if the +:ts:cv:`proxy.config.cache.permit.pinning` is set to non-zero (it is zero by default). Objects which are in use when the +write cursor is near use the same underlying evacuation mechanism but are handled automatically and not via the explicit +``pinned`` bit in :cpp:class:`Dir`. + +.. [#] It could, under certain circumstances, be accurate for none of the alternates. + +Additional Notes +==================== + +Some general observations on the data structures. + +Cyclone buffer +-------------- + +Because the cache is a cyclone cache objects are not preserved for an indefinite time. Even if the object is not stale +it can be overwritten as the cache cycles through its volume. Marking an object as ``pinned`` preserves the object +through the passage of the write cursor but this is done by copying the object across the gap, in effect re-storing it +in the cache. Pinning large objects or a large number objects can lead to a excessive disk activity. The original +purpose of pinning seems to have been for small, frequently used objects explicitly marked by the administrator. + +This means the purpose of expiration data on objects is simply to prevent them from being served to clients. They are +not in the standard sense deleted or cleaned up. The space can't be immediately reclaimed in any event because writing +only happens at the write cursor. Deleting an object consists only of removing the directory entries in the volume +directory which suffices to (eventually) free the space and render the document inaccessible. + +Historically the cache is designed this way because web content was relatively small and not particularly consistent. +The design also provides high performance and low consistency requirements. There are no fragmentation issues for the +storage, and both cache misses and object deletions require no disk I/O. It does not deal particularly well with long +term storage of large objects. See the :ref:`volume tagging` appendix for details on some work in this area. + +Disk Failure +------------ + +The cache is designed to be relatively resistant to disk failures. Because each storage unit in each volume is mostly independent the loss of a disk simply means that the corresponding :cpp:class:`Vol` instances (one per cache volume that uses the storage unit) becomes unusable. The primary issue is updating the volume assignment table to both preserve assignments for objects on still operational volumes while distributing the assignments from the failed disk to those operational volumes. This mostly done in:: + + AIO_Callback_handler::handle_disk_failure + +Restoring a disk to active duty is quite a bit more difficult task. Changing the volume assignment of a cache key renders any currently cached data inaccessible. This is obviouly not a problem when a disk has failed, but is a bit trickier to decide which cached objects are to be de facto evicted if a new storage unit is added to a running system. The mechanism for this, if any, is still under investigation. + +Implementation Details +====================== + +Stripe Directory +---------------- + +.. _directory-entry: + +The in memory volume directory entries are defined as described below. + +.. cpp:class:: Dir + + Defined in |P-CacheDir.h|_. + + =========== =================== =================================================== + Name Type Use + =========== =================== =================================================== + offset unsigned int:24 Offset of first byte of metadata (volume relative) + big unsigned in:2 Size multiplier + size unsigned int:6 Size + tag unsigned int:12 Partial key (fast collision check) + phase unsigned int:1 Phase of the ``Doc`` (for dir valid check) + head unsigned int:1 Flag: first fragment in an object + pinned unsigned int:1 Flag: document is pinned + token unsigned int:1 Flag: Unknown + next unsigned int:16 Segment local index of next entry. + offset_high inku16 High order offset bits + =========== =================== =================================================== + + The stripe directory is an array of ``Dir`` instances. Each entry refers to a span in the volume which contains a cached object. Because every object in the cache has at least one directory entry this data has been made as small as possible. + + The offset value is the starting byte of the object in the volume. It is 40 bits long split between the *offset* (lower 24 bits) and *offset_high* (upper 16 bits) members. Note that since there is a directory for every storage unit in a cache volume, this is the offset in to the slice of a storage unit attached to that volume. + +.. _dir-size: + + The *size* and *big* values are used to calculate the approximate size of the fragment which contains the object. This value is used as the number of bytes to read from storage at the offset value. The exact size is contained in the object metadata in :cpp:class:`Doc` which is consulted once the read has completed. For this reason the approximate size needs to be at least as large as the actual size but can be larger, at the cost of reading the extraneous bytes. + + The computation of the approximate size of the fragment is defined as:: + + ( *size* + 1 ) * 2 ^ ( ``CACHE_BLOCK_SHIFT`` + 3 * *big* ) + + where ``CACHE_BLOCK_SHIFT`` is the bit width of the size of a basic cache block (9, corresponding to a sector size of 512). Therefore the value with current defines is:: + + ( *size* + 1 ) * 2 ^ (9 + 3 * *big*) + + Because *big* is 2 bits the values for the multiplier of *size* are + + .. _big-mult: + + ===== =============== ======================== + *big* Multiplier Maximum Size + ===== =============== ======================== + 0 512 (2^9) 32768 (2^15) + 1 4096 (2^12) 262144 (2^18) + 2 32768 (2^15) 2097152 (2^21) + 3 262144 (2^18) 16777216 (2^24) + ===== =============== ======================== + + Note also that *size* is effectively offset by one, so a value of 0 indicates a single unit of the multiplier. + +.. _target-fragment-size: + +The target fragment size can set with the :file:`records.config` value + + ``proxy.config.cache.target_fragment_size`` + +This value should be chosen so that it is a multiple of a :ref:`cache entry multiplier `. It is not necessary +to make it a power of 2 [#]_. Larger fragments increase I/O efficiency but lead to more wasted space. The default size +(1M, 2^20) is a reasonable choice in most circumstances altough in very specific cases there can be benefit from tuning +this parameter. |TS| imposes an internal maximum of a 4194232 bytes which is 4M (2^22) less the size of a struct +:cpp:class:`Doc`. In practice then the largest reasonable target fragment size is 4M - 262144 = 3932160. + +When a fragment is stored to disk the size data in the cache index entry is set to the finest granularity permitted by +the size of the fragment. To determine this consult the :ref:`cache entry multipler ` table, find the smallest +maximum size that is at least as large as the fragment. That will indicate the value of *big* selected and therefore the +granularity of the approximate size. That represents the largest possible amount of wasted disk I/O when the fragment is +read from disk. + +.. index:: DIR_DEPTH, index segment, index buckets + +The set of index entries for a volume are grouped in to *segments*. The number of segments for an index is selected so +that there are as few segments as possible such that no segment has more than 2^16 entries. Intra-segment references can +therefore use a 16 bit value to refer to any other entry in the segment. + +Index entries in a segment are grouped *buckets* each of ``DIR_DEPTH`` (currently 4) entries. These are handled in the +standard hash table way, giving somewhat less than 2^14 buckets per segment. + +.. [#] The comment in :file:`records.config` is simply wrong. + +.. _cache-directory-probe: + +Directory Probing +----------------- + +Directory probing is locating a specific directory entry in the stripe directory based on a cache ID. This is handled +primarily by the function :cpp:func:`dir_probe()`. This is passed the cache ID (:arg:`key`), a stripe (:arg:`d`), and a +last collision (:arg:`last_collision`). The last of these is an in and out parameter, updated as useful during the +probe. + +Given an ID, the top half (64 bits) is used as a :ref:`segment ` index, taken modulo the number of segments in +the directory. The bottom half is used as a :ref:`bucket ` index, taken modulo the number of buckets per +segment. The :arg:`last_collision` value is used to mark the last matching entry returned by :cpp:func:`dir_probe`. + +After computing the appropriate bucket, the entries in that bucket are searched to find a match. In this case a match is +detected by comparison of the bottom 12 bits of the cache ID (the *cache tag*). The search starts at the base entry for +the bucket and then proceeds via the linked list of entries from that first entry. If a tag match is found and there is +no :arg:`collision` then that entry is returned and :arg:`last_collision` is updated to that entry. If :arg:`collision` +is set, then if it isn't the current match the search continues down the linked list, otherwise :arg:`collision` is +cleared and the search continues. The effect of this is that matches are skipped until the last returned match +(:arg:`last_collision`) is found, after which the next match (if any) is returned. If the search falls off the end of +the linked list then a miss result is returned (if no last collision), otherwise the probe is restarted after clearing +the collision on the presumption that the entry for the collision has been removed from the bucket. This can lead to +repeats among the returned values but guarantees that no valid entry will be skipped. + +Last collision can therefore be used to restart a probe at a later time. This is important because the match returned +may not be the actual object - although the hashing of the cache ID to a bucket and the tag matching is unlikely to +create false positives, that is possible. When a fragment is read the full cache ID is available and checked and if +wrong, that read can be discarded and the next possible match from the directory found because the cache virtual +connection tracks the last collision value. + +---------------- +Cache Operations +---------------- + +Cache activity starts after the HTTP request header has been parsed and remapped. Tunneled transactions do not interact with the cache because the headers are never parsed. + +To understand the logic we must introduce the term "cache valid" which means something that is directly related to an object that is valid to be put in the cache (e.g. a ``DELETE`` which refers to a URL that is cache valid but cannot be cached itself). This is important because |TS| computes cache validity several times during a transaction and only performs cache operations for cache valid results. The criteria used changes during the course of the transaction as well. This is done to avoid the cost of cache activity for objects that cannot be in the cache. + +The three basic cache operations are lookup, read, and write. We will take deleting entries as a special case of writing where only the volume directory is updated. + +After the client request header is parsed and is determined to be potentially cacheable, a `cache lookup`_ is done. If successful a `cache read`_ is attempted. If either the lookup or the read fails and the content is considered cacheable then a `cache write`_ is attempted. + +Cacheability +============ + +The first thing done with a request with respect to cache is to determine whether it is potentially a valid object for the cache. After initial parsing and remapping this check is done primarily to detect a negative result because if so all further cache processing is skipped -- it will not be put in to the cache nor will a cache lookup be done. There are a number of prerequisites along with configuration options to change them. Additional cacheability checks are done later in the process when more is known about the transaction (such as plugin operations and the origin server response). Those checks are described as appropriate in the sections on the relevant operations. + +The set of things which can affect cacheability are + +* Built in constraints +* Settings in :file:`records.config` +* Settings in :file:`cache.config` +* Plugin operations + +The initial internal checks, along with their :file:`records.config` overrides[#]_, are done in:: + + HttpTransact::is_request_cache_lookupable + +The checks that are done are + + Cacheable Method + The request must be one of ``GET``, ``HEAD``, ``POST``, ``DELETE``, ``PUT``. + + See ``HttpTransact::is_method_cache_lookupable()``. + + Dynamic URL + |TS| tries to avoid caching dynamic content because it's dynamic. A URL is considered dynamic if it + + * is not ``HTTP`` or ``HTTPS`` + * has query parameters + * ends in ``asp`` + * has ``cgi`` in the path + + This check can be disabled by setting a non-zero value for:: + + proxy.config.http.cache.cache_urls_that_look_dynamic + + In addition if a TTL is set for rule that matches in :file:`cache.config` then this check is not done. + + Range Request + Cache valid only if :ts:cv:`proxy.config.http.cache.range.lookup` in + :file:`records.config` is non-zero. This does not mean the range request + can be cached, only that it might be satisfiable from the + cache. In addition, :ts:cv:`proxy.config.http.cache.range.write` + can be set to try to force a write on a range request. This + probably has little value at the moment, but if for example the + origin server ignores the ``Range:`` header, this option can allow + for the response to be cached. It is disabled by default, for + best performance. + +A plugin can call :c:func:`TSHttpTxnReqCacheableSet()` to force the request to be viewed as cache valid. + +.. [#] The code appears to check :file:`cache.config` in this logic by setting the ``does_config_permit_lookup`` in the ``cache_info.directives`` of the state machine instance but I can find no place where the value is used. The directive ``does_config_permit_storing`` is set and later checked so the directive (from the administrator point of view) is effective in preventing caching of the object. + +Cache Lookup +============ + +If the initial request is not determined to be cache invalid then a lookup is done. Cache lookup determines if an object is in the cache and if so, where it is located. In some cases the lookup proceeds to read the first ``Doc`` from disk to verify the object is still present in the cache. + +There are three basic steps to a cache lookup. + +#. The cache key is computed. + + This is normally computed using the request URL but it can be overridden :ref:`by a plugin ` . As far as I can tell the cache index string is not stored anywhere, it presumed computable from the client request header. + +#. The cache stripe is determined (based on the cache key). + + The cache key is used as a hash key in to an array of :cpp:class:`Vol` instances. The construction and arrangement of this array is the essence of how volumes are assigned. + +#. The cache stripe directory :ref:`is probed ` using the index key computed from the cache key. + + Various other lookaside directories are checked as well, such as the :ref:`aggregation buffer `. + +#. If the directory entry is found the first ``Doc`` is read from disk and checked for validity. + + This is done in :cpp:func:`CacheVC::openReadStartHead()` or :cpp:func:`CacheVC::openReadStartEarliest()` which are tightly coupled methods. + +If the lookup succeeds then a more detailed directory entry (struct :cpp:class:`OpenDir`) is created. Note that the directory probe includes a check for an already extant ``OpenDir`` which if found is returned without additional work. + +Cache Read +========== + +Cache read starts after a successful `cache lookup`_. At this point the first ``Doc`` has been loaded in to memory and can be consulted for additional information. This will always contain the HTTP headers for all alternates of the object. + +.. sidebar:: Read while write + + There is provision in the code to support "read while write", that is serving an object from cache in one transaction while it is being written in another. Several settings are needed for it to be used. See :ref:`reducing-origin-server-requests-avoiding-the-thundering-herd`. It must specifically enabled in :file:`records.config` and if not, a cache read will fail if the object is currently be written or updated. + +At this point an alternate for the object is selected. This is done by comparing the client request to the stored response headers, but it can be controlled by a plugin using ``TS_HTTP_ALT_SELECT_HOOK``. + +The content can now be checked to see if it is stale by calculating the "freshness" of the object. This is essential checking how old the object is by looking at the headers and possibly other metadata (note the headers can't be checked until we've selected an alternate). + +Most of this work is done in:: + + HttpTransact::what_is_document_freshness + +First the TTL (time to live) value which can be set in :file:`cache.config` is checked if the request matches the configuration file line. This is done based on when the object was placed in cache, not on any data in the headers. + +Next an internal flag ("needs-revalidate-once") is checked if the :file:`cache.config` value "revalidate-after" is not set, and if set the object is marked "stale". + +After these checks the object age is calculated by:: + + HttpTransactHeaders::calculate_document_age + +and then any configured fuzzing is applied. The limits to this age based on available data is calculated by:: + + HttpTransact::calculate_document_freshness_limit + +How this age is used is determined by the :file:`records.config` value:: + + proxy.config.http.cache.when_to_revalidate + +If this is zero then the built caclulations are used which compare the freshness limits with document age, modified by any of the client supplied cache control values ``max-age``, ``min-fresh``, ``max-stale`` unless explicitly overridden in :file:`cache.config`. + +If the object is not stale then it is served to the client. If stale the client request may be changed to an ``If Modified Since`` request to revalidate. + +The request is served using a standard virtual connection tunnel (``HttpTunnel``) with the :cpp:class:`CacheVC` acting +as the producer and the client ``NetVC`` acting as the sink. If the request is a range request this can be modified with +a transform to select the appropriate parts of the object or, if the request contains a single range, it can use the +range acceleration. + +Range acceleration is done by consulting a fragment offset table attached to the earliest ``Doc`` which contains offsets +for all fragments past the first. This allows loading the fragment containing the first requested byte immediately +rather than performing reads on the intermediate fragments. + +Cache Write +=========== + +Writing to cache is handled by an instance of the class :cpp:class:`CacheVC`. This is a virtual connection which +receives data and writes it to cache, acting as a sink. For a standard transaction data transfers between virtual +connections (*VConns*) are handled by :cpp:class:`HttpTunnel`. Writing to cache is done by attaching a ``CacheVC`` +instance as a tunnel consumer. It therefore operates in parallel with the virtual connection that transfers data to the +client. The data does not flow to the cache and then to the client, it is split and goes both directions in parallel. +This avoids any data synchronization issues between the two. + +.. sidebar:: Writing to disk + + The actual write to disk is handled in a separate thread dedicated to I/O operations, the AIO threads. The cache + logic marshals the data and then hands the operation off to the AIO thread which signals back once the operation + completes. + +While each ``CacheVC`` handles its transactions independently, they do interact at the volume level as each ``CacheVC`` +makes calls to the volume object to write its data to the volume content. The ``CacheVC`` accumulates data internally +until either the transaction is complete or the amount of data to write exceeds the target fragment size. In the former +case the entire object is submitted to the volume to be written. In the latter case a target fragment size amount of +data is submitted and the ``CacheVC`` continues to operate on subsequent data. The volume in turn places these write +requests in an holding area called the `aggregation buffer`_. + +For objects under the target fragment size there is no consideration of order, the object is simply written to the +volume content. For larger objects the earliest ``Doc`` is written first and the first ``Doc`` written last. This +provides some detection ability should the object be overwritten. Because of the nature of the write cursor no fragment +after the first fragment (in the earliest ``Doc``) can be overwritten without also overwriting that first fragment +(since we know at the time the object was finalized in the cache the write cursor was at the position of the first +``Doc``). + +.. note:: It is the responsibility of the ``CacheVC`` to not submit writes that exceed the target fragment size. + +.. how does the write logic know if it's an original object write or an update to an existing object? + +Update +------ + +Cache write also covers the case where an existing object in the cache is modified. This occurs when + +* A conditional request is made to the origin server and a ``304 - Not Modified`` response is received. +* An alternate of the object is retrieved from an origin server and added to the object. +* An alternate of the object is removed (e.g., due to a ``DELETE`` request). + +In every case the metadata for the object must be modified. Because |TS| never updates data already in the cache this +means the first ``Doc`` will be written to the cache again and the volume directory entry updated. Because a client +request has already been processed the first ``Doc`` has been read from cache and is in memory. The alternate vector is +updated as appropriate (an entry added or removed, or changed to contain the new HTTP headers), and then written to +disk. It is possible for multiple alternates to be updated by different ``CacheVC`` instances at the same time. The only +contention is the first ``Doc``, the rest of the data for each alternate is completely independent. + +.. _aggregation-buffer: + +Aggregation Buffer +------------------ + +Disk writes to cache are handled through an *aggregation buffer*. There is one for each :cpp:class:`Vol` instance. To +minimize the number of system calls data is written to disk in units of roughly :ref:`target fragment size +` bytes. The algorithm used is simple - data is piled up in the aggregation buffer until no more +will fit without going over the target fragment size, at which point the buffer is written to disk and the volume +directory entries for objects with data in the buffer are updated with the actual disk locations for those objects +(which are determined by the write to disk action). After the buffer is written it is cleared and process repeats. There +is a special lookup table for the aggregation buffer so that object lookup can find cache data in that memory. + +Because data in the aggregation buffer is visible to other parts of the cache, particularly `cache lookup`_, there is no +need to push a partial filled aggregation buffer to disk. In effect any such data is effectively memory cached until +enough additional cache content arrives to fill the buffer. + +The target fragment size has little effect on small objects because the fragment sized is used only to parcel out disk +write operations. For larger objects the effect very significant as it causes those objects to be broken up in to +fragments at different locations on in the volume. Each fragment write has its own entry in the volume directory which +are computational chained (each cache key is computed from the previous one). If possible a fragment table is +accumulated in the earliest ``Doc`` which has the offsets of the first byte for each fragment. + +Evacuation Mechanics +-------------------- + +By default the write cursor will overwrite (de facto evict from cache) objects as it proceeds once it has gone around +the cache stripe at least once. In some cases this is not acceptable and the object is *evacuated* by reading it from +the cache and then writing it back to cache which moves the physical storage of the object from in front of the write +cursor to behind the write cursor. Objects that are evacuated are handled in this way based on data in stripe data +structures (attached to the :cpp:class:`Vol` instance). + +Evacuation data structures are defined by dividing up the volume content in to a disjoint and contiguous set of regions +of ``EVACUATION_BUCKET_SIZE`` bytes. The :cpp:member:`Vol::evacuate` member is an array with an element for each +evacuation region. Each element is a doubly linked list of :cpp:class:`EvacuationBlock` instances. Each instance +contains a :cpp:class:`Dir` that specifies the fragment to evacuate. It is assumed that an evacuation block is placed in +the evacuation bucket (array element) that corresponds to the evacuation region in which the fragment is located +although no ordering per bucket is enforced in the linked list (this sorting is handled during evacuation). Objects are +evacuated by specifying the first or earliest fragment in the evactuation block. The evactuation operation will then +continue the evacuation for subsequent fragments in the object by adding those fragments in evacuation blocks. Note that +the actual evacuation of those fragments is delayed until the write cursor reaches the fragments, it is not necessarily +done at the time the first / earliest fragment is evacuated. + +There are two types of evacuations, reader based and forced. The ``EvacuationBlock`` has a reader count to track this. +If the reader count is zero, then it is a forced evacuation and the the target, if it exists, will be evacuated when the +write cursor gets close. If the reader value is non-zero then it is a count of entities that are currently expecting to +be able to read the object. Readers increment the count when they require read access to the object, or create the +``EvacuationBlock`` with a count of 1. When a reader is finished with the object it decrements the count and removes the +``EvacuationBlock`` if the count goes to zero. If the ``EvacuationBlock`` already exists with a count of zero, the count +is not modified and the number of readers is not tracked, so the evacuation is valid as long as the object exists. + +Evacuation is driven by cache writes, essentially in :cpp:member:`Vol::aggWrite`. This method processes the pending +cache virtual connections that are trying to write to the stripe. Some of these may be evacuation virtual connections. +If so then the completion callback for that virtual connection is called as the data is put in to the aggregation +buffer. + +When no more cache virtual connections can be processed (due to an empty queue or the aggregation buffer filling) then +:cpp:member:`Vol::evac_range` is called to clear the range to be overwritten plus an additional +:const:`EVACUATION_SIZE` range. The buckets covering that range are checked. If there are any items in the buckets a +new cache virtual connection (a "doc evacuator") is created and used to read the evacuation item closest to the write +cursor (i.e. with the smallest offset in the stripe) instead of the aggregation write proceeding. When the read +completes it is checked for validity and if valid, the cache virtual connection for it is placed at the front of the +write queue for the stripe and the write aggregation resumed. + +Before doing a write, the method :cpp:func:`Vol::evac_range()` is called to start an evacuation. If any fragments are +found in the buckets in the range the earliest such fragment (smallest offset, closest to the write cursor) is selected +and read from disk and the aggregation buffer write is suspended. The read is done via a cache virtual connection which +also effectively serves as the read buffer. Once the read is complete, that cache virtual connection instance (the "doc +evacuator") is place at the front of the stripe write queue and written out in turn. Because the fragment data is now in +memory it is acceptable to overwrite the disk image. + +Note that when normal stripe writing is resumed, this same check is done again, each time evauating (if needed) a +fragment and queuing them for writing in turn. + +Updates to the directory are done when the write for the evacuated fragment completes. Multi-fragment objects are +detected after the read completes for a fragment. If it is not the first fragment then the next fragment is marked for +evacuation (which in turn, when it is read, will pull the subsequent fragment). The logic doesn't seem to check the +length and presumes that the end of the alternate is when the next key is not in the directory. + +This interacts with the "one at a time" strategy of the aggregation write logic. If a fragment is close to the fragment being evacuated it may end up in the same evacuation bucket. Because the aggregation write checks every time for the "next" fragment to evacuate it will find that next fragment and evacuate it before it is overwritten. + +.. note + + I do not understand the extra key list that is present in an evacuation block. It is labeled as needed for + "collisions" but I am unclear on what might be colliding. The bucket entries are stored and matched by stripe offset + but if two fragments collide on their offset, only one can be valid. Based on how :ref:`directory probing + ` works and the logic of :cpp:func:`evacuate_fragments()` it appears that rather than determine which + entry in a directory bucket is the correct one, all of them are marked for evacuation (thereby handling + "collisions"). However, each one could have a distinct fragment size and that is set for all of the reads by the + first fragment found in the directory. The intent seems to be to read all fragments that collide at the same starting + offset and then figure out which one was really on the disk after the read by looking through the key list. However, + this seems to presume those fragments will all be the same size, which seems unreasonable. I would think it would + also be necessary to update the size in the :cpp:class:`Dir` instance in the evacuation block to the be largest size + found among the collisions. + +Evacuation Operation +-------------------- + +The primary source of fragments to be evacuated are active fragments. That is fragments which are currently open, to be read or written. This is tracked by the reader value in the evacuation blocks noted above. + +If object pinning is enabled then a scan is done on a regular basis as the write cursor moves to detected pinned objects and mark them for evacuation. + +Fragments can also be evacuated through *hit evacuation*. This is configured by :ts:cv:`proxy.config.cache.hit_evacuate_percent` and :ts:cv:`proxy.config.cache.hit_evacuate_size_limit`. When a fragment is read it is checked to see if it is close and in front of the write cursor, close being less than the specified percent of the size of the stripe. If set at the default value of 10, then if the fragment is withing 10% of the size of the stripe it is marked for evacuation. This is cleared if the write cursor passes through the fragment while it remains open (as all open objects are evacuated). If when the object is closed the fragment is still marked then it is placed in the appropriate evacuation bucket. + +Initialization +============== + +Initialization starts with an instance of :cpp:class:`Store` reading the storage configuration file, by default +:file:`storage.config`. For each valid element in the file an instance of :cpp:class:`Span` is created. These are of +basically four types, + +* File +* Directory +* Disk +* Raw device + +After creating all the :cpp:class:`Span` instances they are grouped by device id to internal linked lists attached to the +:cpp:member:`Store::disk` array [#]_. Spans that refer to the same directory, disk, or raw device are coalesced in to a +single span. Spans that refer to the same file with overlapping offsets are also coalesced [#]_. This is all done in +:c:func:`ink_cache_init()` called during startup. + +.. note:: The span logic is also used by the HostDB and more than one otherwise inexplicable feature is provided by the span logic for that module. + +After configuration initialization the cache processor is started by calling :cpp:func:`CacheProcessor::start()`. This +does a number of things. + +For each valid span, an instance of :cpp:class:`CacheDisk` is created. This class is a continuation and so can be used +to perform potentially blocking operations on the span. The primary use of these is to be passed to the AIO threads as +the callback when an I/O operation completes. These are then dispatched to AIO threads to perform storage unit +initialization. After all of those have completed, the resulting storage is distributed across the volumes in +:c:func:`cplist_reconfigure`. The :cpp:class:`CacheVol` instances are created at this time. + +Cache stripe assignment setup is done once all stripes have initialized (that is, the stripe header information has been +successfully read from disk for all stripes). The assignment information is stored as an array of indices. These are +indices in to an array of stripes. Both the assignment and the stripe arrays are stored in an instance of +:cpp:class:`CacheHostRecord`. Assignment initialization consists of populating the assignment array which is much larger +than the stripe array. + +There is an instance of :cpp:class:`CacheHostRecord` for each line in :file:`hosting.config` and one "generic" record. +For the configured instances the set of stripes is determined from the cache volume specified in the line. If no lines are specified all stripes are placed in the generic record, otherwise only those stripes marked as default are placed in the generic record. + +.. note:: If hosting records are specified it is an error to not specify at least one default cache volume. + +The assignment table is initialized in :c:func:`build_vol_hash_table` which is called for each +:cpp:class:`CacheHostRecord` instance. For each strip in the host record a sequence of pseudo-random numbers is +generated, starting with the folded hash of the stripe hash identifier, which is the device path followed by the skip +and size values for that stripe, making it unique. This also makes the sequence deterministic for any particular stripe. +Each stripe gets one number in its sequence for every `VOL_HASH_ALLOC_SIZE` (8 MB currently) of storage. These numbers are paired with +the stripe index, combined across all stripes, then sorted by the random values. The resulting array is sampled for +every slot in the stripe assignment table by dividing the maximum random value by the size of the assignment table and +using the value midway between each multiple of the result of the division. The coalesced psuedo-random sequence is +scanned for each sample in turn and the first number not greater than the sample is found. The stripe associated with +that value is used for that assignment table entry. + +While this procedure is determinstic it is sensitive to initial conditions, including the size of each stripe. + +.. rubric:: Footnotes + +.. [#] `Work is under way `_ on extending this to include objects that + are in the ram cache. + +.. [#] This linked list is mostly ignored in later processing, causing all but one file or directory storage units on + the same device to be ignored. See `TS-1869 `_. + +.. [#] It is unclear to me how that can happen, as the offsets are computed later and should all be zero at the time the + spans are coalesced, and as far as I can tell the sort / coalesce is only done during initialization. diff --git a/doc/arch/cache/cache-data-structures.en.rst b/doc/arch/cache/cache-data-structures.en.rst new file mode 100644 index 00000000..508c4dac --- /dev/null +++ b/doc/arch/cache/cache-data-structures.en.rst @@ -0,0 +1,164 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Cache Data Structures +****************************** + +.. include:: common.defs + +.. cpp:class:: OpenDir + + An open directory entry. It contains all the information of a :cpp:class:`Dir` plus additional information from the first :cpp:class:`Doc`. + +.. cpp:class:: CacheVC + + A virtual connection class which accepts input for writing to cache. + +.. cpp:function:: int CacheVC::openReadStartHead(int event, Event* e) + + Do the initial read for a cached object. + +.. cpp:function:: int CacheVC::openReadStartEarliest(int event, Event* e) + + Do the initial read for an alternate of an object. + +.. cpp:class:: HttpTunnel + + Data transfer driver. This contains a set of *producers*. Each producer is connected to one or more *consumers*. The tunnel handles events and buffers so that data moves from producers to consumers. The data, as much as possible, is kept in reference counted buffers so that copies are done only when the data is modified or for sources (which acquire data from outside |TS|) and sinks (which move data to outside |TS|). + +.. cpp:class:: CacheControlResult + + Holds the data from a line in :file:`cache.config`. + +.. cpp:class:: CacheHTTPInfoVector + + Defined in |P-CacheHttp.h|_. This is an array of :cpp:class:`HTTPInfo` objects and serves as the respository of information about alternates of an object. It is marshaled as part of the metadata for an object in the cache. + +.. cpp:class:: HTTPInfo + + Defined in |HTTP.h|_. + + This class is a wrapper for :cpp:class:`HTTPCacheAlt`. It provides the external API for accessing data in the wrapped class. It contains only a pointer (possibly ``NULL``) to an instance of the wrapped class. + +.. cpp:class:: CacheHTTPInfo + + A typedef for :cpp:class:`HTTPInfo`. + +.. cpp:class:: HTTPCacheAlt + + Defined in |HTTP.h|_. + + This is the metadata for a single alternate for a cached object. In contains among other data + + * The key for the earliest ``Doc`` of the alternate. + * The request and response headers. + * The fragment offset table. [#]_ + * Timestamps for request and response from origin server. + +.. cpp:class:: EvacuationBlock + + Record for evacuation. + +.. cpp:class:: Vol + + This represents a storage unit inside a cache volume. + + .. cpp:member:: off_t Vol::segments + + The number of segments in the volume. This will be roughly the total number of entries divided by the number of entries in a segment. It will be rounded up to cover all entries. + + .. cpp:member:: off_t Vol::buckets + + The number of buckets in the volume. This will be roughly the number of entries in a segment divided by ``DIR_DEPTH``. For currently defined values this is around 16,384 (2^16 / 4). Buckets are used as the targets of the index hash. + + .. cpp:member:: DLL\ Vol::evacuate + + Array of of :cpp:class:`EvacuationBlock` buckets. This is sized so there is one bucket for every evacuation span. + + .. cpp:member:: off_t len + + Length of stripe in bytes. + +.. cpp:function:: int Vol::evac_range(off_t low, off_t high, int evac_phase) + + Start an evacuation if there is any :cpp:class:`EvacuationBlock` in the range from *low* to *high*. Return 0 if no evacuation was started, non-zero otherwise. + +.. cpp:class:: CacheVol + + A cache volume as described in :file:`volume.config`. + +.. cpp:class:: Doc + + Defined in |P-CacheVol.h|_. + + .. cpp:member:: uint32_t Doc::magic + + Validity check value. Set to ``DOC_MAGIC`` for a valid document. + + .. cpp:member:: uint32_t Doc::len + + The length of this segment including the header length, fragment table, and this structure. + + .. cpp:member:: uint64_t Doc::total_len + + Total length of the entire document not including meta data but including headers. + + .. cpp:member:: INK_MD5 Doc::first_key + + First index key in the document (the index key used to locate this object in the volume index). + + .. cpp:member:: INK_MD5 Doc::key + + The index key for this fragment. Fragment keys are computationally chained so that the key for the next and previous fragments can be computed from this key. + + .. cpp:member:: uint32_t Doc::hlen + + Document header (metadata) length. This is not the length of the HTTP headers. + + .. cpp:member:: uint8_t Doc::ftype + + Fragment type. Currently only `CACHE_FRAG_TYPE_HTTP` is used. Other types may be used for cache extensions if those are ever used / implemented. + + .. cpp:member:: uint24_t Doc::flen + + Fragment table length, if any. Only the first ``Doc`` in an object should contain a fragment table. + + The fragment table is a list of offsets relative to the HTTP content (not counting metadata or HTTP headers). Each offset is the byte offset of the first byte in the fragment. The first element in the table is the second fragment (what would be index 1 for an array). The offset for the first fragment is of course always zero and so not stored. The purpose of this is to enable a fast seek for range requests - given the first ``Doc`` the fragment containing the first byte in the range can be computed and loaded directly without further disk access. + + Removed as of version 3.3.0. + + .. cpp:member:: uint32_t Doc::sync_serial + + Unknown. + + .. cpp:member:: uint32_t Doc::write_serial + + Unknown. + + .. cpp:member:: uint32_t pinned + + Flag and timer for pinned objects. + + .. cpp:member:: uint32_t checksum + + Unknown. (A checksum of some sort) + +.. cpp:class:: VolHeaderFooter + +.. rubric:: Footnotes + +.. [#] Changed in version 3.2.0. This previously resided in the first ``Doc`` but that caused different alternates to share the same fragment table. diff --git a/doc/arch/cache/cache.en.rst b/doc/arch/cache/cache.en.rst new file mode 100644 index 00000000..f103d9fc --- /dev/null +++ b/doc/arch/cache/cache.en.rst @@ -0,0 +1,33 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Apache Traffic Server Cache +*************************** + +Contents: + +.. toctree:: + :maxdepth: 2 + + cache-arch.en + cache-data-structures.en + cache-api.en + cache-appendix.en + tier-storage.en + ram-cache.en + +.. appendix diff --git a/doc/arch/cache/common.defs b/doc/arch/cache/common.defs new file mode 100644 index 00000000..5ac36e9a --- /dev/null +++ b/doc/arch/cache/common.defs @@ -0,0 +1,48 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. Global definitions + +.. Files in github. Should write a custom domain so we can do :ts:git:`iocore/cache/P_CacheDir.h` + +.. |HTTP.h| replace:: ``HTTP.h`` + +.. _HTTP.h: https://github.com/apache/trafficserver/blob/master/proxy/hdrs/HTTP.h + +.. |HttpCacheSM.h| replace:: ``HttpCacheSM.h`` + +.. _HttpCacheSM.h: https://github.com/apache/trafficserver/blob/master/proxy/http/HttpCacheSM.h + +.. |HttpCacheSM.cc| replace:: ``HttpCacheSM.cc`` + +.. _HttpCacheSM.cc: https://github.com/apache/trafficserver/blob/master/proxy/http/HttpCacheSM.cc + +.. |P-CacheDir.h| replace:: ``P_CacheDir.h`` + +.. _P-CacheDir.h: https://github.com/apache/trafficserver/blob/master/iocore/cache/P_CacheDir.h + +.. |P-CacheHttp.h| replace:: ``P_CacheHttp.h`` + +.. _P-CacheHttp.h: https://github.com/apache/trafficserver/blob/master/iocore/cache/P_CacheHttp.h + +.. |P-CacheVol.h| replace:: ``P_CacheVol.h`` + +.. _P-CacheVol.h: https://github.com/apache/trafficserver/blob/master/iocore/cache/P_CacheVol.h + +.. |ATS| replace:: Apache Traffic Server + +.. |TS| replace:: Traffic Server diff --git a/doc/arch/cache/images/ats-cache-volume-definition.png b/doc/arch/cache/images/ats-cache-volume-definition.png new file mode 100644 index 0000000000000000000000000000000000000000..238d291af02adab8165f67c4f79f7ca60e5ec5a0 GIT binary patch literal 3086 zcmcIm`9G9vA0EU(M6z>8Ml!}AWMYi6#N-%zWyw}ytixC%WR0vt6oX_kM0SM?F(G>y zrtC7Pv2V%lo$CFZbN+z${o#4;>-+uO%X2;Vb=}`P&KRl3$u7(e001}<`Z}icmPCJo zSsCf?D^Ufr^o9Xrs;31g>l3BXgJaH@4KD)#mC48V9hm7c8(QBI0|1<$k2*nO=Cpiq(J*(Ex?=vpsGo!e3DG7A= z)F=kr+1%_&E@E&?P{3unC$xZf-ccwD=%@z#)f{{qlZah{!Fei;2*2wRE{R&b|8ykH zcXVZ5P+bK~RDWHsCuDObRE#ZnR=ZyOYod`Zb(-^JLYbw*m+y*LDTKm}>DY_1EJ}hR zwh!_ei-N%LXfrjJ)F_ji7Y-`i@-|N&DU?p|leYQa5kl3#;O|6`cAGTqwIA3t~*crkuE6!2S-Y%8*SrEz)nVDM+8>^EQ1`e;)q3$ebOaCLTOkeGX3gosbcG;KBlX+(x{CR-8x73 z8>g5Bv4<=Ipre3*_LqEIGYnOV@BkDrQ5194^TvL$UA$t*NQMJJIG=$f3kiKc&}YRZ z^Ufv&lOQ3c-DjQKJ4#A%ZfR(=UT6kTOSCAD)x1i#!w4g3H8fS-z_{|4$pQo^0Z?Q> zTmAlFh{1)JTu|bS)a@Jl5w#zULm1SDIFK^!XF3bxSvWU7>vVZ>Udo49N4P7uXI9B+ zM}H*1^7ZeS6ZZHgc~y-eAYORYmi- zU9+5BSGoRaY+w*Cu5dOT7oOw9{~}vW?la71Akv+$>ZL>7YGI?Q3gXsP&)&$3OAkej z>9W)e9R4No*G)6N>~uV5*XAV9$ zS1b$T7vJ#ajjS4CH<^x8Hqk>Sa0C+*Q&40Xl6&QOY)R5O!l!=A42~%CnMs=g>HW`v z!^C679Xr$d7}GmpMlW6C!y@-qxN%Ns%^i#S7>mZ@u;?k}$UR#5%u2C^;acILPz0I7 zkf%$&XGn)0ouo&3cz!;w`4tIh?<&I8gr4O8J6Zluk|&~4I!dK;{Ry`iBEwOSH4lY; z)gfzHFRN5gAyw43EEo5ONOHq;`Y`%>%%X$|Y?atk%)?Chk~a8k+VJ3FR62=gZ;V?E znc*1B+Mxm+rAB}?N$LD5fg`f6vkzLdVed(oS+YJ}R}us-Ev6KW)c*;6+&=?9B5)tTw9;kZOcq;;cm(jg7G0aGPZxn`p&r*#jWz&(tf4c^C>MqlU2N^7+oX$i+p>4>u`)l-AFW!TU}ZBZ zl`!yYqu9iJX@q2ui4cFk!L4)fXe)O<9Lrh;J6KcC{3PoK1qW&kkMF}?LWayH2{vw* zH`auVH!^(~uln7em3g_l?0a{u)TeuftjRH}(cDdWLW|SV0M@hisJSoCm;Pl{2ITqo zM^>!;cf>}Arg+K$FiPBhV9}fUSI?;-^MR7{_airp6<|J$NKoP0Z}LPwEy>L23Uy1{ zoS>%6;c};+_BU~ik0hI3&q>Q28rIIHpg|gt z(#k>9Hn6rR&@5ORo%LHi#Gjh|rBYX;t^T`xDZe`~{VmQ3))rI@on4nFXiv}9Sw948 zW!_l)>7NNQ4oaRX0qOhR6*lQ1{$lZ`7j`D6=BsQ-`U11Ag(2AeALB9CnSVNH{YXB% zM05vO-<4{F{Xc+cvZkigQ8y%WAuRLY@aDb#F z(w)Qd$_V|mK`L~~L>1wd2OshtDNsO+Cy~Yc(rV_4QNO`%adG>LKc(|3XY{Qh^*^?w z?@`@lAS19Z-#S-h=}wE!LoP4>$;JzEXIqM>_1=n4er|v%^bl7+h;&kIwKO1VwiiO{*5%+A5SIhb=h{nqMga#>muT`lw_D}iEz z;VQ3YCgPxof$?Cd0lczS1&B9Ap68wC?@ATwAC^sO$FyT5i?A%}#;S-<8auS~g0sy5 zmnQ{>C6j^(31!2v(|MF&=vH)gdv`Ksn1T?*d`e(R@Fr_2xb1w@sRFXG80;(DT05DM zf6~@GrtNqVzFRqAzKRxGvxVSVUpZw3+B7>ykjLJ|F_H{0Quw^icUYt!i3>{O<4(~mi zFg41@aYAOFaxY$@{|`ut#z3ddw`sY&*7p$Q-AoS+iR|24e5XZjS*^rMB=+ROjGk}P ziExt$9&G;3;6qjhpwWTMv2W=)wPL3F@PJ-LVP=kt*F7~&mK<&Xd#plv9f~_De75`s zgKp^(Jh5f>*71zg!sf8PtYnP0l~4Y8evc>N8xe-o8Yz)mt}6~HSJrvXgF`9n$&Tlm zPBJ=y#VpW;=c;F=vq8ejFw~aBOy;C;xWodFLi0)*Ga+Bg_m1Lidzn#1_B7?^Z}*#w z&~9l?;-#}DX_F)+%%i^^vz&QDmN^D!Id*$RRVp}q|6i8Bj|+qb?oi^wJN(y}7@$K5 zepup~eJ>{$uzI@MA=bZPv=Um_SU0q$3=KuVk=YH)_3Jw`fCItT>9NYp2sgg{`e z5D4@$0tSAP9c#w~{(xRRsiy@g|G@nN`~&W!X`~5(RNbQAwW9|AMtbU>y9$9Yfv1>S zp|3qk9U%}hCy}6e%HMjvp5BMGKXWl#!O42=9WF(6W61YOpYn!v*Jw?@pLJ)wh)7ke zQ$p7eGGz);>6>uHC{75mGH`|)#nd-IXq6C7cR(q9gku?Q7|D|^hDR}u3n37Z7pan) zV1h{8at{|&j=7UO6uPl-zh~{?gO`+F2aV6pKC1Vde^CDQ+spNexvIINjnA9oIF9n_ zE3B-p#>B+<(jo*RQTEIigu@cH9_L9U)o?<6rjs^tuzsxPaXCdTXBQVSNy)a78FiEv zLZFR?!wCbCTh3h!rX4JPsRztd0e@oFphsDz%9-sP^uJ}--lvii$V1Y?wSfymaHa56SAHRKz&ter{C@L>EUU^wt`&1rC03+Qg zgY-gdw)H)_z?gzJi4LxcL9z(*KMwrm+VmKM328xNsCHIVAulVnJ|0op%Rlk>nE2O z9qo74=f%+>f8SZFjSv%IK_O0^)zl0dgYRrC^0Bj5Ut#>Y^6{>oj&z%bxP-)gRv_`R z6weeAA0C!Z5AjAhW1cv54Oe>!u1sByBvWJW-MiMNTY345T$>rgRdkp|-TwC6X5Nu$oGnHln;=#+_UOfM zI5D5rs@k)_8E)!^h`FJSaAia{g^Y#PI9-G>nVmWnIU6{g>u1_JVEp<^ebB~gm&UDI z`6fUWV~y>NUgESOiltC&HMv=(B8h45VOWJEm-`Log3kjs@H92>VLuGSgB-CbSm(cU^BQ%fn zY-!jKsCJTmJI`&qh@pF~!-{Rg3US0RAl6lcM%-P|%7@c-owYT9aj z=Ptd^dX+7U6o`Z;!G!Jdfrg#eRyxJ4WqOfJDlvqyoji5wX~62G`JQhRo68ejYOC+= zo0s^!r5Fx_JaV&n_VdH(_t`O_0|x1mU!J<2SduLiZ>54lG#mLbivlc14}g*4A$AZ&+6_Qaa(`AeS~Ap^mB&hG9~qM}W=?`0bx1aRiWRHES>H4dw1aqcz1unp0N zWAN(9wg>xrGdB{YsVwblZJS|)$xIWOSIpkvmT|Fiek0fY?(=;l$Fhh#P>9r)huOX# zeBq%7Kj_U1ULlrwZY+HBXxI`~r$2aP$6R^CDEnAI8?G{`OXHyK+E+A5$j8@L;IUhO zIWQ#tmDRk1;Mv~$9k+|@FwrKqQmk-(tWXt#&HzqbNq!f&P&4hAB$~gzbR;eyM8*`T z9a6|lIBX&GN6$U-jR$55ek_?kwmMZ4TE&tP zS*^pm6rX=)*=k0YMGMiXk8#ETC#CJUzq^H^E{$MFX0&DSHf{lS2ld823?Lt1f6 z+)ASwEM8aMq>5ECI>Q~cfC&|%J;P)8LM*V%$JY{VFj7p6jMuk+lyu(=-HZusN`MPo z^wCJ6Uk@Cv3lO}N|5oqyEwUjqj|vW5>GDa~KRSuZATb!1#9`qLgo{a%%Xp)VWF-}M zl!;y*s2oc>Yu$YNVYYEH>?<3Z1JGAW)a1HSoPu~BT}yq1`8j?%|Z zpRVq#U-_wZ<;pyWI3B}wHBMc3l&^m8xXQhdMey=k9Qz9qlqcP7cX8E2rm+CQC+ONDlg|z|0|tU zLQE{_)srVrUdfDNHy7590-d%_zJej!9>UTek|fbJabYvt+T*#8SR zPLvtguroYE)laG~wqwwvac}&l^bMg=nXJ6Lk)9q{ibak06qiRGbdWSdCRTe7^JcFB zn{(E?LgNEXgTy4_b|Gh=@hCHyIA|>?xP}i~OSC=5+R8c=|5=yTb@O|+$M}U$Jp4>y zHn6}zXQ4W1W6#Qpw=B)>nWC8+?96 zx^dV0+O=cqtlQzoN-t<51P+0Hvy)XKu|23m%h9F;e)vSO0VSB2(L#^Qb8C%%W!!FV zm$a-#9WN~2^RpaZ{MxwtwsBPB#e(Z&y!1*tm&eStBA98FNM10eFG^Z#Bdm?u(GQs2 z%!knBn>P7={UPc#K0ZEpn@UC@A)&e@O+=OcgDg17P@e!SWt?&JOZUJSL}nEia6U-l&M z7AYyKz-=#neP%G?H7@R~v=Vn(Q&ZEy;jX(72;uxuMlJXv0ZDN*wD2)R2+e3#AHSDgu zo$ku(cONp~)(8n+|M{izLP$+yqNrmI5%HB)w@C*1a{`2|OYbtmv>*KRE^KbZ*Z2xX zZEp8BCh%5ORG4gpSwI?BfgbHHYIskQ*9WaH_Lo}?e0cCc{F{#!qK2g_lVDF$E3h&a;(`z`NM5Efp(L;)y)a>PLLR`wP{55O(-`m~gmmRi&YK6Ae)X_zUe z9(TUO4WT_(hF4`>5y?xeoT(UzT+ zMZvJ&tPl?v&- zW#o6j^>-yJO#olno@ z@5Z$Wz>Hefro_BFx7S>LQZF{+BH6(azIWv@>)_$D5M}2pS9IXQ0*lm9hMOk%Qape7 zeFih?1N`(Ub91V*B>dX?y8R(+M*8;10WvR9Kl}rcfq_A7b2P%vNK1c>iU$DxXV0G1 zQl-lp!qQ2IV<%ampBfs}35mCADrvKkWGQaxYDT%{M3}At?#H zo2__V_Cgs!L&WZ#cY3a>-}qJE*9ZX)6POWBXVB5#zep_UJ{Lz2XO z-vR+&>B>=#$k{a3FSDqExng~zz5t`rgXE>Bc`G4L0t@cIGzOp;j=Kd8 z;!iMPac>KEbP=7GLEe@@Qw6~Mj213M^vIFi<3Y#ZE&NDfbP@Xa@#D35WBT9*x3;#r zS-+^>Y+{g65U2H}PQk;?Ia=s{#hzRFeiX+DQ1c`mx%kLYdHJ;t!CMqd^L>?#D~zyt z3s6dQ-i!6t56~2nY{wpojg76Nnc2d+B-~}(u&}h0k2h@v{eaMam)cyabGMpuQSiqJ}Hw-OR& zLv$g7iTSiD=g*&S*!i|t_;CH#Kp#Q@*@-rQ*kewfBx%?E>bsEfCR{Vx?gYpdaK4|2 zN+;b>J`$sf<`*tJ4AMt*+F~wZdYbvMPW=3+L>fahNNIKIymqf!>B2K1Xa}`e6%CBy zovk)Y5@O%=<^_tE0KotFp#+u~T|{h9{@n);T!HpJUgUKI_I7ppL)vUhe{;W|4sANO zN~-IFC)anb4Ij}&Kx5^QRI;(AZM-doxu+2!Nr;F)=|HzbL0+|jmi`bG&+7O0+A)t* z7llSW0|El(=jTs4lsbL0$3UO5?mfGQK62y;xR}})kRJ$tGHNN-Wyg(&0S!#@2;CWc zoxt;HZ-b`3zTQNAYjUhL=3e^}+uZkLgpZ$}#`=K6c7%aoZEdZ$x3`Z7Mf|$ik>;EN zU;(DfJsNwTG&q>w*BW2Fdez$6+BSZ?@d3%>Z^l7h0m*QO`eNUV?@%-|aLX8I1O!e!vN^PuqBRS#8OOSK$ zGX*u!(W^$U^=w{VyTa-h@ayy0hYuh6{#H5REW)_FmVO%*&kK){;fCuwg16Mdd3gpa z(lS_wgBFhmFPMa^u-um-aki?@rlaWz99J$EhC*Z^DT(^PcQv^Aj3vVXe$+gvE2gso zk}!SjrwlGusA#~_sHTx+LNoM8$R{JUIfQOXUO;CDw*QQRRNk!#@<)2E*o?o zrw9>nFR^_H0ZnEf+lh~N@7m}smjBAoSKnP~Ex8jo`|7Yw>R1Up_*A$WB4-A z{l)#6ySMg#I%&9hc=WrEDA&GxspvjLJ`%dpDY7v?DB?@Kbig)TJg8|8*Lc3Wy9-#L zoBMR_rbQ4fRBD9vPzKCrJ^$%bd1-0pSEP3;*SF=d?I&R;-dH%UBy%1VHb>-P-BSs0 za;m(7LT9^v0s#iVol@Qb@aF?^NX?zhQ-U9V>tzCfM9j?0FP=W_*ovLL6SQ!p-z?f$g0gJNCLlyA+VOQ_~-E_W1dFraw$41tKbzq~O-q zAt{OJopLBAhx1RNk!~0{dc0u17>eHEeSxqn@L_A8OkO;D_QM*jCC5u^SXfe`r<&6X z9n68pqw*zV{k3Vgf4#je?Vtw|jBbtNAZC0jHrlxzx^>%ku8&8}-}G}6!|qOGRBCFf z+OJ-7X0@dz5Q+i7m0nzD4GapBwyItGQIa=Xf(gP4+q$$X&46fltWF@A$REqZi(Xrv zS-JOQqif?x!hN6Jj^MdRFK~SMj@-h~aAOa4Uct3rzt~t=m%dp=CgQnekoI&;&B#D) z+F*NsfA-gYYuho*e8W#&K1o#6g*((+zR8qzXnzv2zm<0o@cF^%n}?;-afyJ_uoW1n zOp`cUcyspWPu4JqC;8#oiWMm1>#(i*&GDN{M{9Pb)ej!JLmTJ~$F5rM8j%T0@kqSr$<0Hn5y>ZMY;*PBJZ|5nXvew7#+gD*262pt^CE5cDk~G6 z1c82cbML;X>^<4{8wu2d%7qstpXzM7W{ZO z8lFnc>-lT;b91w%#Nj*nOro(Xz(}`VgzhM=anxdy^+9~-mTg>lBk}(IdoD%yHj@@X zz#p7*Xl(^TXlG52<-6kLB^x4A`wK9L5F_jtid!_-Z7Eo7*VIT#Nx8be8=yX!DgWT% zL$m3{y48fWA!{m)*u1@;PBk?(b+X|o40To{J>@AqQ3;6)FWh-d3~($y0|5C0mdzRX z$O{e`(car@Kh9bQ2&t-aU0@T>1@A%_weT!?NTZp31|my@h6-Q4d|6k=eSBkgYgGfd zgNF;t6t}c7baj*C)Y-GzVm><{Rg{xQ_EVz8wNV$O-~C6Aj);o-uYJ_B37BH@nU!If zA7dc~1?_x)pDiG3n)&MH;nnjr0ytr3jC=A7t;#s5M5qn|FRZQQ%ndwqrVo(FfM*5l z$6!xSVJk^V)7+fJ@Lo|7+(r=($3%G8yda^pB(+mKGbh!$(JX35#>4RY}L_>qG7-kdpY(@mF zrEl4dQ-dc^)p}dg>oAx$jbxTt=IipQWdSELk}@nCj#!dM$lyKG7e+J#%~oPitaO#8;`^K%0X~MmS@JTU7si7OML0 z!-qaNAOvWQTCVy}hB4hq*Vnjl^CnPkc|dzJX{Ib;c?0qzN`!jiqNAe=3$d{*T4eKX zT;0vN-%D&K<>hs41YopsqJ+m>EJOc%6l5>KC0T$T7Xe$~@#C8rb%~XCaCF2;NZgd3 z!bU0~jIh+68^2$7bSY}Z%T$XT(TzJvb@>Aep{RHkaOw73S%f#E<5WCqN0KhF zvau~k)f$peSLkdd6s5o=g&W| zdXX0%4!dskLrG=A7~H1MRtMm$XQp38l?!p#qMvG{*6N#F+V#~f0dCoZBi(k*F_R_P0$}vTH>r&~FB?Dkw3D(vs$eG+Md5pMj4ChP zEkA7dU5G#G^kEWtO`0nHXoGppNBD;r4;CAVahaek@#W8Ru*hntQ46&ve|LsLS!(08 zdCml3KwxEkV6cDGxz@A@iBC&Qo6!Mj+vPPW?+C5#w9T1*9#fhr1WgEME+GlW7!yw4HdqLU%c7%7z7!C|+p zcLI#A!oBR1_J;ytZBy`w-M5C_7ZGKA&f!I9?`IDmKRzjSb5t)aJNv@Pli_(R(k^JH z0w6D=7O|5LFHLqxG3x;x?o^I8bpWvO6Lm7|xwuICYfa*fp3k38gG|gWZG<<(pWNTq zxAKw8o&|W#Zd^{w69vH+<6O6^rsN}XuB68Za&mIktu}g6=oDjlO*xva*zv)x_wU*6 zrr8KDR`FM9rFcTKBF+5^F2TCw*?ahzPmeS6$)$v537i>~-%NRc^td|F9{=N0iCef* zjOE4Fu4kpCh%;e%vLW@r)jXxMr?_kIV(gYe#N9`6kR!}Swp3s#e;ZD0*qzp$3rz7D zY8Pjn)LZYKX=%Py0Q_Bbb;cRZvZiA{VDiWkedqTb5!aTeYU~(eP6cygZ#$F|)S*@EVV>&Gvz=o#cF?TQ+qx zX46$y3d!YntfSJnl2-W(>EJ?UfM;8%kq;(oi5*mfMnb-ybA}tWs5Xg%0@6oMTQ+A^ z1qB7`V`j;w1Hx7$VLLwVSHR{?kLDK@;ocx4y3s)pY3OU9%T3OgKk%L9_ZLTy`=_Wzl)6F!yhYH~8fuy?`btQDQByBD| z^mAh`FE1;rT;aOr=T{U)IJ(Uk!oSG^6Y;i1X-gK3)z}6Fv_z!wvj+M zps2U$d-}|o$GOFPms6xj?j{a>KqtCxQ4ZG4q-97K}j*}va=$M$PFnTJJ+>{p`Hh=`2 zVXbfX*z5DSJ|$uEF*0b^By{)ng>@bVZv-;qp@RO&bWn;@atjGj=afssGXB2b;6L{p z29QimyP1WB1=z$5>3pqQ?7ez&>;fmIx^X?R3kwVQd!tdeSfjw(?G>mJ4B3#hdc`Ny z|h#T2rB{`@(ZEH6ZP zF|Z}DvO+fwky;3_yQ*psFg->_#w-Ih%3T9X2^U7|Iji@t-aImh+qYbX367YD32R1z zU2--mG*0%)TSeK;ycS3%hq zFr$288@dgbUVff2B{3(L?-*8H%uNo9*y{R_9!!`uc}`U81TVn~2v&Xn{=KiaSD(p? z2i$l*QHk0Jn*d26DF$y1@5FJp*)hDI4#a)RV6zaWJW=@R^Jk$&2H-cxtuo77|MfCk z3|Ko_=;v;wS+bu|#u7LPoX~a@>|hTZEH33xCv%ecwEpX@k-hqnPnLv-5_ncP$b4o?cV#4| zZE499Ek4rah3I(vx8if<=JUuJH-D+qwR#llj(H{ps>Ce4mSQ~ye z(jFB(nki5THXpiwHuOqe_f26S5Iz6n2E;-YyOl`QnCOH@40jLUB=Zm0R2qHgL*2+I4-uRgN5-ao^-eR84^s*m`--e$J$)}B0OQoHQ`wCU}$qHXz^|6 zwQGBw#|Xj~jgHX;34m971I#BXDmqHVbL{UW`ZrzzOaTD$tqC_$;xirgrTS971eH>B}sUUb-03qyQne=Pt%T6(@By~sdj6L{C@Q~CWf&|AfL74UFfxA zSW|f9y@zM|^DYNDo|hUw$=(uHT_-7DSyv{aY4`re!@Pj$$LJ7`w`n|w!`~{oV|e=8 z!&P>s<`oa;)Y(NIHXaHdZ4wC+@Dvo<-G6yD{0)Illm5haZN~m3%}vYT?andTZ*s|! zquWtYQK_-j0CC{U_G~DTFts#ucQ-ii#AfH4H*&^ixYsyvaAse`J9HJkxNghbdIuR# zQtbFa2S?B*lWo1c=*2wPiQz4wLm_(7Hxo~XDwM`$v=tb#D(1 z<+*1g=va9h9T=rwY^!WGjUL$NYM!ay8W&YZd-$Dr5+kbL(TV3(dlq%|N1SKa zSZKVvsHl|>)kpjyEo8s=^>SI}?cjA7E{KjJ+?64|xam@fymzMHn|dQ0hVl-*zwcPG zeLt#U1dt?%lu>!(U%#%g_|e>ZNcLB!y=o zN@aLN_g*VA9UWb;)KU`_piA|!EKh*Nn0a zaX#dg3fetpXI+g@AU;|`a&P?Jiy^+;d0Tac7NnRHFHOYPPQZ8hLA`V?N8SZ94mmKE zp+81SUasGyl?rF$EWt7)yI{l)tFPwmn+Wojq8rX}cwoXe+v;~T#-8vdL}*|@hT^=E zC47nEBe&W9Kqe5B@-0uukWSCkq3mlhVaW+2b@l$s<4xvh8W7=|OLO?+6VIKGXbV03 z>h?NQ#m`Zr@P>_OP&hDWLg17qr>703o`CflAb-{~xp?v3fm1Loh1uAXK!evR;#vH( zQplJhZ!HC_OD>EU4aN45J)b9wc4v*8aLyk=SzX0z9G*33q*r4XKi5}%Da@oS%<6f| zX*bKN_9@k7=$Y9(5HI4szR)k5`o!xT)`eccNlSO_r;!~g3!5t2{a!d3V(nh9yf@=( z1&#Xh@TKqMebb1FH* z@F`@1PQO0Q8@S3_d_3dy92qrI%}Oe5H7X{|Hh6vWfp@cX)+ z($dmTj7OP*e0cR_jK=QJbhlV?}Y9eVZClmf85@buhTYC zG)+5&{6Ue)>fw*~WV`1djiO!QrqrH@zdJlfz5&jTr7NZ^2v+B8Q<6t2#(rj?fQBK? z%r~H2&J1RRb&uBd7%^AP!3c&ALa#eo{6CLvmlf%jzy80Fpk2(b>Fe5du2>#TS`MNh zNt~qp_q}hlP62^|Rso$qhwbHW2iA}L8>O8w#{x>nzJ9;?V=J0zEx}A4Rti&!q+A-1 zgd~CT44T@UB5@#o(6p2lnf)6=CY6T?+ob>>xA-+cVQ=_-Sh;MiF61;MhVnV^P+C+e zc)viQEky{FwKU@Z^8}JafKD$*Pw#p#Q!wb0)z)^s;wc(7Pl;~a&y%tYmFY)a~F&vI@W6J1U}BdiPSz#zHqDvSNSsglT4ER$J#-RsCLG zpv=trD}9yRbiBh^AQHAF0dP4aA;2sjv1zc36Tkpd5j%1O@VlpN$Z>y#|3{^bT8w_9 zzDhoRy6z}&v`ZO;)f!%Hv4t!d%@0I98mo&AZ z7Pb?4@b>xWLoOSQrR=Mi6=P}ZZ!!p#2>T4e*fLkN^=^g2`FnozbvGsf#4sJ5I&(Un zRmCc7y__Ov&AKMs?A#psT$TN+wI82!SXES15_nXFy-5DH7`$M(;W)S}O*F&|bOB-< zASHXwEgJNl_!K--qCeg03PhE_z?qTLh$P<=JiFd`AHITfh2>o*&gX@Yu-E zemW+;lGYKM&rpn_H#EaMfgLs>_)oQ+ZWX|t^8Fz>@i|{UHiIP5Bv9)yyioo9Q^Nbh zdUhD;oNBQ<%R7tPY7+)Q*Qbw77SpA2aQ+(K%y9k*L`+7t#oJ-X5DQo^jP=l=8y4)# zV&=vC$a^F_KkX-QE=z70TAv0h%6z06+>HPu{b4CWh_y9uF=%EwgJqlU~WRaq; zK($VYWt~*y45uR#`SprHGZUWJsIiI1WCET%<3pB$0T%Sil4SYiLicw&Q$!r-;_)Xz z8YCe{@tzH=S%k+)PtakGPEK=mrTIne9k~Ec`;XUSYZ`Z|r z=5Lp6vj_c0qEu#hJ60AKh%Ux_Gg35);$HdNn{R8S=pO@gMCk@Uh!wuq98C=!CXGQ> zS7(d4!x)yvQi_%XxsnByIOIroqyq5y4)ef=QS3slQMsanEI7Rjp3 zLDuU2%%}u^BMBS<1C{wEoX{+TB#|>B4X^Xcx??J#{KC*9cqVR`ZEiOibfYRl8Nr^IjrP^ID8jWb;gbG&$Q z%&+2!U_>&fpkH#20tkedxHo`S$po3B13h;ZnH~X`hsiUA4bk`rfx4 z&iQhLkJV?;1T?n3Nt5vVIj}qy!)L$!)vZE+cFPsMj1>;+xWOjz{aq%Dzp)l7Oa}X@ z#3bedo(HsfKyV+I1;fQ|Tp4NLDMa<*TJEHfq=0&sfKvJW`^Y(HGF2N_WRoKJ%8+gn_luoya9j^d{7h=7$|?7$vd&psf*Vud{f2{MwaL}j_SE77<+#R7iNf?Qb!7! zb8F$5E(tsi-tsP^LzA^iiMC>dQASY)l%^BZ3a^e=3U4X%+*8rRUCA|l{{v)pKmuo(GRkU9g%B=F=ZP3*-< zpKxLBd&?`2KpNKQHN#mp9>t<4z@gIW!jQtw%^f@*D?$)lGPw@AHNAbcy=7hvg@r-Q zpgT=$WuuC;S8W-J$?_T49@`r;TCBDhP)R%Uk7!Sy4;@u)J{>0Y-pt-JCS)Y-*J@2{qAY?3lJLr6qfPT#fku|iRdXq-cVQe>I_xi}=i*Fac zy#y7cwMQP;>6JS|C*l=Nb#bc-t-_T7~+*@8sGO<(HNzU_YeTkr(PF}`y*ER3FJZpDGd?O#B@uyP*5 zYR-{sj-&M(!`e(M8k?h3sb=WP__1Gn$#&SS~R2Z52*PcsEq8jbOe-GZ+L0J^4Epzll~{iH>MQ(@-$W@{G}$Cok^Lzwzif}nt@^Uv0zGO77Q?OWX@c7R6|BQy6^()1O_tf zF?zRnb$a~ieA4p-^%D4{gk;msg@e?D;%SJ5rKO@X&+@9)L&d(?2&bl&b7Tuw1$6BL ztov5t=qQU#!-jgmhN9d(k{x8`D;me^)>LAftC@+AiLUe*Cu!SK#ra)p<8j8sjso5x zTROqD)ll*>g)zyr4B)vHJEZ;%d40IK67;+A`mK{uIBaFu z!h-up1R9YEn*{!MVWbf(f{1KVwEjHVX>eOA%Fwp?`p&w*Icw{3Uda!KF3MhJu?2V zDZ?=mx8am4V*EGTPn{$^eU#w^9!_L!V%2Ue-;u&FFS1nB5HG_(sSSXoLRon10{&ZmVwFxs;5fPE0rmIs;k+ zMLWZ>y4;1M64Rj%{Vh^ZVNI5;xWtkE{uiKukigFf8j$pQz(!0N)Zu0sau?~|uvh%l z8Y(2}8-r;ZN^6cM^+#&kwkdby?)5asL}CY$h5$Nz#jnch$gSl>qQ{hxx0{(&0RR8+ zp}{9;+t&jx#|Qe3D;=kziISTiNyZEq81FL$)x1J#NP`o>>e7Dof{(K1x+mg zZSgk=v&hc_V;Wi2EPrXCcLMj^+Qm{zHNHe!5BXcdasvl;a47TVq|1_R7-*ch45=l$Dq3!b3)@ zi5Bp5TWaOI`F);yD%`N){8&hvK;gs-#ML%lLv+|5!iB5zZz(jjZNNbqm~2wjK`e=hPrYz`48*!myDq>QWVw) zmoEAbmDkL)M#Rb>T_ep$Lqp4`lyNETZ(s1UD7G(DmxnPi@Hm}0b7((1R)V1Shhd!;qKPJ&j^O<>Q9oF{JX9QH9LJCxQdd^LH(@ZKuG7ah;3HHzGjx z%ICVoxan-!pP2Fz)Jy>H_!!Ibe`WY$gp=?vn?ANffHiV_Of2$~5s%>81(gNTO`-jq^2!aAn^;wJmAyP`otCvjR z?`M)9BZ3kRXfX`-v*}}6XNatE`F2Y9KlsLfCugt%PowI}BqV&$o2c%_9nxLma=8%a# zrl75F=wr#Hy-|j|CD5;c(mXh#Zsxz+zOqivGsH#gVHx(uksyE*9)yNg=?rjUQr2aqoP$@RK%+`>I?Yg;G` zVIPA))k>zHl-s+bcxo_QJ*5WU7FjD!W-iQ2hZM*a{F7xAC`a-9OI-j`L?fuOB3mP` z|IwG`ECJjZ^~PUsx@WE!`A2|0Jkaxk4VT&i;|c6Yr zUpDuz&iu}d7yN8}N>YjKdlW{(=Bw(E|5n0T_gei?-}r(8fwgDJd>x0M+srq( zgkY%UmPaF)$20KBG~zkJV+n0ZSqngj+AcO9mTU>zU#5LFoJj3-BdEx zC;iEtVpDlDZ=lXDk%^RRRI3UV}H3=AMTy@0yl+v_bkHb7t1z};kaW(Xq3yd0GGCBGZ{G>w;Z2XZSYh?@+PAKFF2%_jYiRC9X`ad6P^~Y$ zI=%Zy!~E#Fv!vX$5NwruiTO@iNaZDR#K5j{z4xCW*seb~QBG_Nw9>AW>$yk! zV|MlpX!$3c*G@r1{0Nk601ETpUSG(Y?Y|lEVgn|lge0)6FqOsg=X=l+ocIBjL*GW^;ZqR-jyA~ykj-HvT z+NZdm+FaxLv)jOonCw@lC!g6k6wQ0R#eo)F0Nc>bV_&y@8L2AjrP{C@-kG_Z8hzW#4}d4;1$`T2yibf#=lp|IV7tN)2n z-xL5^Y)3!G>)bq41G3VbM96=WTe8NT$mK@vm^mdgY3Nc47PGeOw!RnEM#KP}v$H6?>e5S4@&$S3eB}*R|>H#XR1TdHL4F zFIA}mCXfaJ>{D!pH^4VC=Im!vY6%|0r~ zQR<0BoS8D0uF+Y3^StgT{yOH+e^dDs?i3m09}}YAMn?EHd>QSUJGl^9`kFl7ejvPzs#s zInwEu*fhuKO+3FcA>8Q~t780BqajIdg9x%RwojAU1Z=xX{C^l=(>$f5<}_RrN0Qpw z7^(MOY_KCF?zXHy)V}*cO4oXm5f`%ogKq3}X_&eZpP-?`Hn= zxXnO_g1#Wei&=30Iv7Pz^0%U@8wy;FVmULSv0&7*zszWo-IjUOX_6`SoaC2hgUXb5*#*yP~k@LnEn{8EBra`kYVp#N9gAt_4$ zQP;P(w}B-4!Ejfymwe^h!QKaXV5h;Mo{^XE@VGl~&&S`@1EY*RDM`zl#e(nPd}A zpH9n3$x!kd@4at+-H(WSi^FVp8~Ue=OA4dPgDJ{nn6QrdGc~)A47m zA~EJ$Rq5CY+k6{Lit07k2d6rXc#b$@yWNy4+OVkLZ28D`g@uZA3bfG+aOY?{SU+r1I6S^7)}F*s&b8wgDJItqlVqG!C5JP5sML?j>vX< zTG~lK?rqIYy?ih>M=H2%K_w@@pU_EQ9ps|tfo zRqJ!-Vi5`NsHmute;|JNXu;T>+qT5`*~TOIQvu&%XV}*yktW zo`R?=2cu!Ps2)!aio6F9pt-sE=g-SVMLc3xUthd_{dk3YBEQAaHqFcXIhFpVHVf{R z4XVqTgl=C(?pmP{b?v(^7Vq;cH2hei5-v|^_kjO;N<4M?v=JCi1Yc1A=3o^TUKSBi zRDYeQq`Lo9onQE`Z?RoCjGvXm_QyFkE0=+Z?~{{YV9pI&KYATw=jnPJ=-XcG@SWZ$ zg$acv=4W88zpSiGh8s*P0KWFl@rc2!(jRRa`Q$eoK>Nso^RC$DVfsP5M`~VPUjB>0wy^hkEQk2&f zZ!fgDVk+A~7YiY$zT680gU97Qf!2bEi9plsF_#nzkZ^$#=-eLv3dq%+iiw_pGVpX1 zKkRb(awM(f`KL4B-90oDqrfu2S7iWtrQq-fli#7XX*}X~S0K=<+zdn5h-!m0!Lwr? zfO53O$`x3=f9cqHZ2QO>6R5XQGH-op|2zVF&Uq7@y;W%83lU$t_tn(<$t{pks55C@ z*?Y3%XqTG#*|Q~Xen=1kC3$;qzvL=gOz@?sO<2S+rruEYcCjJ+z(o;FUcFD0XA$AJnDU$ zq7zl_n6Q3@oWLhj@l{W+TL_Znd2_@sF8rozm zMMVyj`-<51_KN^(fDFF6Tvk4C2M{3_!*xkujW=_`&^RYW&>bji`yQ7G=1r#aLajM% zvG~@|@0m^sd>Z9>w=Mstf}b&j>3s(l3CtQKNiY2lM1pocZ4Hz8dwwNO{o27TFpZ8gs9m9(^z-a%kwKaasn{um{hjugxPsYc=2aQ%A;K+tME+>0 zOn&9ShHEfQ@7=!-C0GC4=acOu;x2)NpB!#c-5?@DUfhYE$HhPafs6x>x^Oy#GUR|K zW~e#d`P0H1TEnO3*m%zAk7FF)ss#EFJ8qSL;`_K8Eu9p(JQtb);w`lOD zMuK&8#ji2#*f;l2K21!A#}yPd$>bmzP&c96#erE0&#H=*;axuy-z433N$q%?J%6e| zWO{nKvE*5w@eehC^Pf3=+VrjO@yqKZ|6O4l(L?=Vp!M+M-lB_T{S2deqS{1Zj71JU z8Eck@x2dZv0WfLlas#U~;P7km9dJnq^(Nx(ltj_ma`;gtP&L7x1mmW^FrC z8&YxQiVBa6qVqIDe00O?@}WX2tW3k_TpAaZ3naq&&{M9iB?s$Q0^V8HJsv{E&sysY zXMvCfW)wJvy)`id_2S=r*9AmqQ3gGAn%whm-BM6cctH_MQ3JY;oh-FVzHmWSoBRYJEjglMa^Cs=pyT9+-6yLB3dYSXnPEmbZn}l@NrkG`J(dVO zJu&^bYlfe%Zvzo% zt~{K`{d^o897;(t+T@<|*QV;T^# z>wVt0us7k?!zrBd>oeC+CV|PZyE`|PDfIx7l%lHJuer;M~ctd?H zEa5u+o!xn``_8<3XeM@RI44_a%a--)N5{u|&L?n72vEerAXDnThy>$g>rLQwFhLlN zPIO#}RntO$agdSGx?TF|~&W3oQ9vXjZOdvByYXcX*-Fdh?zkfBn{aoGgQ3qT7Q~N&88_*$Mo9$NXx#x!p z+R&z!{h!V^O+4TFcXJtlvwJl36+;UpHsD{k*hgul96$aMsLS#qIOhn#Fy39ACM+-n~F^(M6%LdMG!1e0$`? z-0*75k%7zoAr@Y^Jf6m+zHvJR#{Qe=cm9S5iB)cn&)qk4K1Hu^80{^4IqZT$AExSsQu!>ro(;-PH)LQ=1A<*&0OFMBoOkL-;GW#MV~1y zFE1}B;L*ENdgL+$0^76oSAl3L#Z7zwlOvf<$yE=L@L1|l2_H@cB9J3PLXX!;=r0ZO z>2_RA>-1e2zAqdy7&3b+zUD5b@9deA9UBE)$SS6}9v%0Gg!uH-*Nd~@_`JgB4Ac6s zsL;-{uV&57DJx!|^jD*g>*6jIKj0&Ij<4+5c&eO(iM*fdHV-any`1V2A2f65tMmop zGlLN@Em8%9zSX@RwbN6_8t%iCmUrDbtUuK%16e3(S2cEYt^=?{=$qz}d79ET z@f||KE@UnL$+Qg0XKmm3S6Omz&CfA9LvJ+WcoiZ%>_Zx^((R;Tbir!Xel>7;W#Zzj zpyIBPes|G@@v7$zPZl>y$ZfH$!9ON`>d4|wEMHw&#JBV(J&NYr9V*W|^B$Q7cWF6P z{Z;>Zxf?2c<#`3=%&$yT(GNlam<<%-YEfKVT!MqEOc=XQ%AgTpI?GdfDQaw`n_Ew; zvLidq#UeEx$zVI1F4m2Qx2IlQ%ll9@qctCFumgNNCPazD18>j^#^^ZM7+ zcE%%!Xl~oK?Ub}N#*rThV5|03`74`2?J-vU9a3cs)!zLJ-DN<`QZxVQ@&unoOuX>o!a`@IZ;hKBBGmrauy#BdPoDn6Yh9@^ z+xic!N?BUq+C({bk^dg|gvr{Mu@j$(6MVzKE(6p9aYBV#4LxqRELYVn>J~iNVR^n# zRl>{r!TQ{yXee#ozA?QM__RhlrCJ+yE7zcRcRuuyV`pbCs-AkXF#TRf!^a-Hhr&XvDt)kL-up)5PHziQ{GiwQ6;U6d zW%pz>HHBMjnm9Q*Ju@zZc!0x>Hp9s!pN!Q=QDSIfvPy|A$B+BJX&a-_^HiKm5zsz~;?cw(K0ae@tn5dU_DIX`Xm( zKr{RD^@ul2WfGdUbZ&%7v(AR$-sr0<%d`C#Kc#DRewZEaU!8iY zeDe75Yh@9@4L{$2y4v_92B*MYe=Jw3DIA4F3_QYBs%#bX`$yFG8~AS?2OJT@>r zIf3h`y3&BkPu^XSpWoWn29yb*NcO0D>4^78D}I#wA?r3KI{V-I7*nbXwc-{=xArXQ z@1pE_N}|auS7xY?_8vO)kjNro;j;^4evq?U;7ihOi*e(F;fRG4Fn~L+H`AM`;R+Ue z-n9yL1O2%D1?bv$(4y{$Yn8kFV%bEQOlay zF7jQ(k+Vn7%9k;pQAkc&J!c6Se1uNy24% ze{Oa8rg_#&Q7L$B#%kE%q}?zomOc6*;8#x~z1fJu=kL4rC_VFyffCBZ#Dox_R)80B zL2K%xz>A+Wa{@%#x1MNEel^vx_D&JGiL2Co3^2qA&*}@N$ge#XPT7|6UJk$PX#qPT ze^`@%V@c`gFh}Ok@{CK2ZvM9a$ZC{PG;bT*egakPyJJ3Em_*Dif`DfTW{MiT@w47QieBwl{Dd{9YKAkOGs)&UxC}-w3z@%=MZEriPC$FO5_JcW z+)fr!y+Upu9;+JP#d%^$634?NzY=Ow*+H$pjcS%oMo2pw!r1}PuVgb{(^5ZcBm?C{ zsv;au0AFr`WE|URz(t|x)d|8k{L6%Wsgu97W68{9M}Z^)XlaR);9EI|y`*u5*Gjw$ zQ%II5r8v`@5;4iUHr+iqC9B{q#YIS^iQGLVbkQ`7FVTsARJd;XuDf-00G^YpkMmvc z={d_Kt8wEG*vDh_Gc3|tb{w$o+>EB)yCs)3C2Hd+YRyEjqc?(O4|zDb*G?VMlK5V6 z-Kq}`WU}ZuaPGuFRjnrk@+EwJI=dR=C>!?_V)X&uqss$5AR#Hn2_*b6J|}r%`r=lKNb4%4;%T_)+Ls6C(oYb5!#}* z5Nvu;**57&D?^Wr$#a4VSFYR?-FWC1W{Ugu+ImnwSPu=RED|X)&io7_%4Zvp z)&2q;;3Fl6_4LLw7RfX6^EgbqA?1XIrlyP%JJh->#$Mk(gm5{%C#PCOgDFQ%B19Tk z)HXzN3)^84j%dWy(N+{0a3<)oYg?F2u=ys-l(UprG*3jV&m9zRJ*Llt&}Ni)`usCI zFkE~9!6fTvT5mYK^Qe~)5y8|HO2yDxX)$lVk~}fk<-QL6GrJSUWzS&T`J?pOA!Dt1 zk~or2UzHrfc@=3T2_pBEbHkPuZR+*iVjhu@gYVp^1^`@Rx1kIR@hNmkXT-KK=pBh!J~)~^TnB9ad85A(NE${#k^)H zPVkB522FEAbY%QV&K5WA`7j>VDS?-3+t)cm5SSWx45URGni6yWoNC`_xrfNCuM^Og zQ$5*n1w7S4Ae&ov6;!LVC4etr&Z(=bGrB*Ub%4K6Gk(^6d12ZCYd9&<()j3JOdGTpBiz=r$6G#sWh1$_E{1>Vq0H5rkA~afQ2RT1Bh12{4`) z4lZy{VgMlh3`Bb~kb0CS8Wacd2C$F2pw3*AFOmGS=Z9Z-XViJNc*}5)pP$UWTv+#7 zd8vKQZ)5ks7T%C4eRt6S=h=#LvmKpaWl1R1i#VO*TrBJ!-y!PEpW5Gl_T%s;@5NI9 z>b(#2Z7qpDt|qh;wcEr*Jw@ofJ(9s#18xO^2mpzc%b^1kMqL?tYxWGpy*_<9QBlbn zJ+R>-gGf(L7xL*E76jD;?6*DV$j81-a);E)kRSzRlJCgcZQN^Z zy?pZA3|x{u(w)RZbHBl$QLwk*LFWh_dw3d3uObiTTtdjhaPbH49Ev@L5O+Jb9wOU^ zu{ZoCQ7Yw>4DkR4JcKyQZ~Q|a*yRfYoZ0a^`M=`AkptmW&Yni%1fG`}9t*=`v$G-X za=Hb~Pw*P79J|3!YYy_r13c!0KB$+=qmnZn{ujYkz+EH@RD~|eDDHVb(cQbGLkJCT8&{&o} zD_{Ux+e?UVc0rsV@s2pyTeA@E0oj;L`JbS`=oA}!BPQFb=31n~$?x5k%VSKx?&QaL zN0f&lN(2=YY3}ST9XAw45Fgc{Kjel)Sn05?}#b@b3ml<@Cc;9ue=u<#%5B=!P>AA>P zlK!sY3Ct5UJmcj&Mww?@7`V-dsaAb5=NYnI0LOMz1hLH!fO!6G@bcT%$wyg!A>ln* zjj8YrpkJ1I?CtFVqJi>isGbKZo(=*dPk9?3;cqxIi_B!nPWoWjf4=?x%|)Vd$PgS+ zhOD-#YOnwLr>DZ_Ja3mxfnB0vrw8dQ;9ID~0t*Ui)s+u;y>AIK@>G$*s)KX7koll5 zp+%}Rx-HLD-u(!C(qzogj;gQ+(>5Y6(Vm5~JWo42J7M27QUsm(?J-qXA@oTN0uU#> z9e`HB&X`fWeiD;e`!^5^JZ69S7zSi z4}#=5ix-o$mpgz)Wcl6d!lmcEU*9`IZVd-iHP!%7PNxY93qRUad>Y~r)8WZ!YlGW< z*I#>a(w8WfJ2upGS)S8Lx-7pnBmn!%YVLEaGCL%kI|5(uW|~Np(7i?ddo(%^FIMMmSjDR)#%yv}&3v!``RInZ( zX3x#8H?xemZu8_#4Ec(V6h0q-<5G>-Vmn*uFFS!+L(TQnDGL_gTyC5pf7E~JD+u?( z;-*)wo_DNdW%}<1G-WVYwf@D26ajt%;0S6}T$@0she|9EIa%(69oiNfWI#0pxZ#g4 z49|B5lclF9v7W~vNIi|u6Bw@s@8Bi0rSP}$>ex&QMfwYS=*3nfZ zaq_HkGgOn#A;y*T){!dNw{Js?&axPrQyE>DOsJIFX2%C+z6w%)Vj1~f+{(g&Ttgrp zLS9s8ZCNvNmwHvPo$ot6Cr|TmUK)v`1P`hDKjwtPdMwx|^>@Pw{r#OhN zr3S!6j&NU}1W+F9{)-d9iEJ~I?v7T|8j)S;gJ8>>rSrl&Wx$w&%wcEGN4CV81CiH!XxqwhupjF+8FWL{3LlM5CSLgMY9_hcf ztX`UwT=gU-lF6)h8}i7IL$?3hIMw&=@q7Pn*y57-EJ}Z?yIA*t0t5?+ z-T8AuXWKeeRb^gud#O6hk5O}sdA`bs5-CNZuse;%DauIDNj{RNDN12FJWq2?Q)u^$ zA{RqKHTDCyQR4JX4g1G(huvTQDE{?TI+KF@Jjb_dv;c{GBVrA(Gv3M1ueWlf1{&_e zBjPnUk#FWfBfqK98DHjDgr0-W7FK4lnh+2S?E^A`h-u)Z!@r1!TCxmunqidt)3&y2 zHpvH?PCS}@>nSt;PT-XbHeOiEnLh^I!37i0Eq z-Eih}=(zv6J+3MWoy5k02QnyZ{OHbJ|9ktDsS80`C>RR-naC|MB#N|7Iki_v8}oI84|XMp%Xu2@`OX=bYqi#Z&Ca^Z?KEH zhlSPg7r+O)EUu%D=M^?X?RRFBqtK07`cmh;2(d?YotQG}&BQ16mVIV{w`3r(>zDL* z-8Z*{n?R<)p+9Q=Q4#3q16vK_fLjXsY43-XkLfhi;O&Q{?}S#0iAia9awD}ADua9E zu5ACXX>ugGGgDO^T=eOv7{P{3rwBq$_QGxZ8=CbGV7 zZ27%E(?4QE-$*B4*4#DQCBYZmFAGnJ{EULor@`{`#Q$8l3EscsKEHXrY^K&<@rk!6 zJ>-2z;r$1P1z0vN6o z!IT#=9MdU0{JA|m$dR{hX#MlP#t6SBY16y$75yg4pag^k@4C=j7k%|uDxHMq={5qK zlSEN&g!dkVxF^V&ZhUiTJeYiEMZ`B&W>=h<-eNDEF_h zi21fA?1D+7fL*hnb8&$Go$7L5I8KQ)Cr_k#oIwJQ_A0}d1w8;Sv?S#-n#%DJJ_QVwgjy)ioYLnNb`{vBr!`&Vr5q$1>i@~Fvl%cu74gyoFyA%KVxQ@uf(;v97 zVe6B%3ekc?c3t9} z$GKyqmTb|-Kt}wz*QRxn=&v5bt8anWv(a=3=u2DWsM2xP5Ty^Tg ztCuhN4=Lk}Z*2be%s+X4A9~a9DT>!D1I%6Dx7YV%>Aw>0wN;~H)MClMpCqQuB1ag9 z$)JqtzG3DF|GxNF^^zVuu%dJ_M6az@ixU4Mf*8sDzh7`iEwk^pn0VS!0_Mwee_m(l zP4Ji+@-G321M=80Ql#H^L?VdZ2pMsfMl>4jebLcrkiGB`h+I(ei&aI=I%@Ep!70*O zX*ujpazL{Gz38rZ`Ee(AXJ<%A=QrVkNX2B9zVVSXcc+4#) z{;A>spH8a>K28M%EnG}O7mJOntzezz1a?0HA1^1a5*7~ibR*H58iGgfIXZ8?t z`?pS3JP+l~lE!`naKY-YeU$u~Plkaj!Uwn*0jpbQU`ZIe#;0}F5B2tjnNor>+18kY z2vR{nEC=k*foT^nE-nvh0w46ZYRmfY-wPY1fF%lxLjI)52pVa%U5bT7MKD{QCE_y~ zW+2OzKjnbn1_vB7DfH;kqtPJME}!o}pz{hmq$Q7=q@^_>1ysoayqB$&%^LBbRMa(n z68glLdBUd1BSeJ9#aJ1oqFxWZwIA{ZBIm~D`w?&+MZDI!q2)h~Erk; zHiN8x?z28-v99;&+H;t*_PKXswmD`yb`tbGv*lPBypj@fm=j_D2bojVTI$-MhA>iB zBS?cA0Cza$%wX`%ma+S$cW`Kn(^;4*YO;ajhsU?7;iEkW;R*$Y=8iFeF)`EaG!3)YG=S$MCkQ4=o|}haBFKzky6?hRS_^@Do%f% zseeW*AOl&lQnCee^y^os4wr|-C!Zmj3zOx{7U7R#ElIZ9LVfbK=07z(YB~L?%j=_I zhi2>k90PwVF`D-7lq)C^lz?yxm2`tvB4B1I*1aaI~mgWn3(0y&j?b*C%iT;x417ois<{ikRX9g)LUX6NZNg2Fr&SaLpqc&Mvbbzhw&sS65dDZJ|(;z`Ab|>IG!l{>hp1cj}hz2(QjB(*J&6;#VUhBLhDO zG>siko@`Q3P!$rU!+O4d`2wx9o{Wy>kI0E;Qi3>Nw_C5aSQl+k=6-S!32cf&B@kQm z(IG|1ee$!p`9-NWH1QI&vXDd36`Ty!R#(PXSKdBwg|3GD^stlE+G7ACDgXSh$N&A< zw|-uJj7&~gw;d>)WFGdGH&6Y;uh*?FJj%A=_RfjbF0Rpk3OxG6G!?zK3IT^UWKWmu zKl)T5(xvAeRQ2uKhoZ|XDMgWEHovbuD?Y8Qu8zJ_Uxd*hMOw@0nWFw%XwW#+vGMsb zmK?tuey;y#eU=X8Ymm6OIF!-=Tmh|-AjumT{&#HRL_rU<2!-3PUG+50q-)sVkY}&a zEGg_#Li-Mo!Ye;O0yKnfAe;rXW`NB@b!uA+?@$klUheaS|I`#T0ZQG&q@vfXBZHX5 zPNJf;7jE+{d4ZSHPwdigG0-srgp#A;EohwcofnNVp$Bag7iWgBWm166eN8^Sk0#mU z4@UITwpHvWUQfupP(BP4cZd~OqGmU6=u#cGB%m_#E2#EqmcCSCuM5sf@acqplS%N3$mT8p69oPCvt?( zd>3f*!jUWAM$<9jS;FDs%9wRrDLlg<_dcGr^8>dUHyhhVgujW|);T5oQa>Ko%Iz7i@~xJdQ%adSpZS{Wz}Y z(E#9s;`n5YIx4FypH0WB?P8A`jZVp+r1^@83;E|jO2&?EGY4pyG~4GdnI{E_D|kG6 zK2S1<wHqh1a{x!39<7H=T)g>$VfzM@1(ba+n0R)R$9Y7 zew^`OeIB|*0IkT)Og@|_qj0?V=;`f4mO|gsj?gz>q`n6c9J6St9t+rh!$$SNGOm0h ze}7H2!$mfU7_WM0W%`0miqm^p+H-x)YeJ2`&W!r@}9^Dz?$`X+EV{i`VrRs{hEI=3H9)?DberW_i%` zrW-hRN*;^!inl<}B{s1VXs-#B46@g_q4CEkYGN~ENcYEp{uWk8vZ`t<$lf-JM4uE{ zlFxA>o)JzwBx^#iXl)Suz~V^0A;Ol*GQYAF-*Zh+Epiu&pk=8uJ_O}SWxEl(6`QPn zDQ1U=EE;92>1W0z4u(X`eiA+AiX&Mou=-%oFL@Gu>ex2-3la>})^6dR#6i@qJMxQCIx7g}IqYS_&ZO-64vpi?d*@M-PTg`z9l1NYrgJ_0^& zl*0=m%fb}Gx*|PmO5kL{;)%X-^+tZ{UN zk$@yM37jqzD0J8&#%jmMl2ZJcn0y~tr0VucP+DSLl4I)G285+ZV@O5xj5h^RQ)e9lrmAzs{{ik3aT z?y({<@dB}tXs3T}v23+VgiH1kGV}1^0t|y5U?gvxBr(~~J?IGW;7;~r0E_*PC{0JS z0+Cu?!pQY7;>RoK*Q}F}zj=N9V#pF*h#4k3GT)#G#0;-)+#qEe&yCln-RExob&SDh z89720n^2aBJg#&D$eNM1ojcCzoVa$9z185QGXIX~dO_`#gA0%q9IZr^APvreffGZs zg3OKVJN=aECOwhk;4*D&s-+P`tgDO-2ijuGjVzuODA+FYF;}4agSuI%Vr|>|h~X1~ z4E84e0>WdvI9sW9yfyiGTq}e$@Ux;6%x$a?QZfIC7#1p^igJWjYm9vQ9LMzpgcl$5 zh&nG_WBw$KL%LsA{tw7P6Y4Kn@;P#xckr7_Uv6~Zr~@N6p&cym<47Uy2|;Gf*91?+O!RG8P`}H| z%b<7G=Dh~5wD+v8e1n{mu1v4>ndmmS(L#IKi|grZoL2*eu+Y?X2$FX!>q3{IAtB^f zzj^&SPqy6@<$afDm$r`1ruhy-<^&;ob?Nhqh`GwoOpvTTLG=2kdi$N(`r4-V-R zbYv1&!?zKOhr2nio8myH4@Hm+)C5nO8Aio-G9`%d z{05Ck4>Dy$LxQZ)fO24Y@nvib+P9K!@Y~*V!WJOpVi0F+1zPh*RH*fUG;((ikQhuMsGadU$(Y@Kb?lU=Kc3ML%9Aua=3*|#FM;^oWm)>nJ<6XR0% z^OD^zkXNqi1w;3tXf+Ss*C7T54G>?~78(EVjdp;7%)8e7z#5&cf38*v$6`WPNNt7g zV;4e96Jd}~9J`20_($S6)j~HaFNGB!_)g1o$W1>qa&BEQ=P(2PNm}W%luD#zYuKCc zlqN;djp8w6yF;u?iR=9H9pg3Z>t3e*Wihxc-< zufoZ8<5t4J5Ao;N;(VDbqti%-Fq0Q=--VBIa;sNRJ(kaRuf=XpW|TcA>PracF8x$!TVK8nQTu*LZQ50^-D1 zj@Qt!gbP0!4!z*QCoYAHLSw;q_fPUk0})%$WV5icRe6|Llsr^n)k`Ixe;fScK@T*U zM7>kAWgtc;zoa@g^;(k?KO{;{)%1I>gaqQ9k>0a>tjV!(P-zmL+$Ct%bI*hm>TD!T z=;fZm>=Rjj6Hn1o_IzGj*l3n0b=0LCX!b}07u*y>wfQIC`kFuX~gp61SAH|51 zSbtrf;Q1>K(lSh?adlfL=-YI2K|ILI7}^@uO21_LUbEsbTfTOr{a=5;@H(?O$)V2P ztox9$2y!Hq137%KLIRZzfS~vf;RYD-z)#emq8k%eI**y>6=w2%7tmm(2&h$R{s1KJ9RA!iftiU@@UQ%e!n zAOhD|O#UdTObRl&JVZ3aX_}#0zil(z3uHFn8vNEK5Mumy+-6;p6x$v;Nk4{6B68MC zeO)RN;{>bq&~FOh|Hp{_rl>fUOUVL!$sD4?{0Lj!)PmAT{T4ecl_QC^UI^9%!X6=F zhLlad9diUTY3$$qLVj(g82YkPfk+iDaMV7e^?3))^oo}1uYbf1{?!F?A|fJygK7DU z;{LDCEA0}}x1drkUMz#+hD+?s`U9}8KffttcbSkl`sK?D_r|S&EXDF6`V4qRU_GJB z(>>RCld-C?U}E$}tVW903arm?7=siNpwNYWzd5jlaPkMx-=PUA%>4TG>kDQ99REI6 z|Fy=YDAt`5*05P)GeceMB8A$E2ja!XTK#4&?42zIYcw zXy4xdIoJcXr+N9iSpLo!u?99_x~bOx89%L{KvCjU?0ux;t-@_Q%e$iAY#0G&f~jH< z!w^mU71-pt&-~wU6K;TiR<&bDBERcD69Ci&o)4le0cM1zBFJ)ahB2_&YwC!B1Ie+b zsWDP8T?XNgXVy6OuK(trXu(}`+ zi81i~7ZePaW>#8b(in=)#fFF=M3K|L^P-!gnp*b1wo6<$i&>aM#YbL%( z+MM&PKp@2HD_Dga-nUntnM5%SJ!>tzLrxyT#z8YYeZFOl2Op>uEXL|$QEw4h+5{W@ zBBW9w!8#J*YGaA(__EYN8ak{8{t|U}<&hPiHaO-*iJFkw>)Z$?VLc?D-odvu><-U# zZ7Bo&(L+>h9HXUmAR~Sm1MfJW+R3LW3fMMMX(T$@H1X%E;Tc z+Cn~Sbuwg=6_qxM|H_9a*w7?3Y@GNRcoshcJSR#WYa4-u2xl|EjgWAK5BVpz+pDP# z(u9E#O{lcTd;`~wl_POI3=z=B4cO4btJt_he&oCIqT6*BFI>2A`SRGrM1$)zf%|y3 zEnn=QmLEBzl;OM4(VMNBaes~t+ms@4=i1#b4Dc^k^^0!F1{__6m3S---rt(iKZO?sHtE$}RSI6rPK5NNKQFyCi+w;)}%Mw;cr=4p_AP|KT`+-+? zGW>Q{DkoI<%&zI&U+CliEGZWOtu$!A8+8#DNTPBy;q#*|7QHV7TU)L9akT2yaG;cU z?$&#kwaLc%dh|PakHsglk=K!m;Z`i*pA!M{-(yQhYrM|G%);f7H_n+eEDaT>qc2`mU-RW@!NmLX7}=Tz_0BvY8EcEugn_}TQ4MtB3Yu!CR`h{ftlDJ ztSm{-i_`zEZrl3kl(my%OLgkx?pibb723?9bl1MHN)FclN=gGOt zh2cgINwv)R330)!x-fq}l~d%&JY?p#6=`kGAR2nPckw}_fO}In@xcr3J}japF*wOf z=}io-&Ed2n(-bHZJ!3;bMZLLjz4H(Ort!`Ir!}2R3{y$dnI8MI=L0A`MT|=spm>;P zx9Q+WgS7WAmc}gi#>x2Z@%oqriLbVpWea`_Zp?0rLS95%R#jF?7=EUU7HxkCIYUq3 zSrWgJ%O+=1C{0oGPziE4f2=Wiyvt3&K7N0mQ$!)V;qh8T$TEWhV2^u9?3YOB9xgC7 zB-$}{wxRi^E#A(@$H($&4+kSKVDa|=)At9@g}u5IklW`Bwpz>T*Q1>S!(bbYw;mY3 zDs6OXk8jV{jI9MEHq*Y8mfp>;!@;NwAq_hUYg!ajMIM)`nTx~YZDzkVJ~8xq5n#SW zS0E-Mqh^ndAFt=ju{HVLRO}ca2^ijn6kNWd%rY$r zt$a7ts7i&7wd%WsCJGHJy^=QE9Batyc$$2o%j1Kds?$s0nKQTJD}@jqIzV7NL~RwX+dLqJsF^XXJt~*;T7MoadzlA5 z%+S&Bv#tbxAroX@LaF*g-4@P6Th7PeW(m&{-n1bpK8#YAw7fevgajME7-zN(1a(4+ z5Zc@xTZaoL#%BpzdIa`cKW_Fc2-yE2ZH?9=N#83yha1d>@haYn*1qd=WAV3N2HSNn zrEJQA*&5TpPvV=i1EeJ6jlK;K2ZFtNd!@j=Aqm)tbIOrWalmm||2?nADe1R10v$U~ z^O^m{;tFhNE#OzB2@VQ;V=x)D?>7t<%6XvFM}LOfF8aaJYQ$24Z0q$_5nvr&R}2RY zJ^evT5DxahaMrzWEz6zpSV#Cv&Nj>UOFzQ`>QV5k zQZ}@s1`&(u90nZT1Yk0|@CW*mCOnD!PPkn<_YLzp{@`EtM7+#5wi+l_lhgmw}>ET+OnTEJ2Rg4riJ3mJwGLvTP(iwQ*e=ldXhIjVUx>!e#zlQ76*mS zAL$*jOx7u6{yDT)s0~~?@EXGvGcUiERu#!#^I`TzxN*stHTqvWXTQt*D zv`Hld3&h?=(XhU?^?rmvh&SEh7vC*EM%io_%wL#E5{3BEq%_7t6Z1ChuVUNNltWcC ztngUEmm*;-xy{C!TxP*%8Yw+FoBZ{SPc%|0M{e0mf_^-cC@XScdr$2y6ovCRGsAU% zPMm3%F~}mk7`2IGfA;xKxV)-iLIcc&^>6Ltq=BL!E+({G`hWqxVFOnP=aJZjhso(_ zAr;4gG$smtQPQ?}nv>Uj<&E`8HX09_D*Cqd58droi$bm%Q%o!ann&9;ffr!9s;iVDk zT*3PNnf!TvH(r=&wLVA>N)ww7Wjv>n`X94Z1bF2QZAfIy1SAR#>5+()`;kH7#@|js z-|IP%X9tqTFXE}_u~(Tb2tigF)!|)}s!Tfu0Fb)>7S|IM|E}HKO57k&V@}>{q^npN zQh}`#?U50rey@P>>(3-#v`j1@I!b5Qa+?o1^AV{iK~9Yt;C-0=6KJ2lnixnrOv=Sk zH1OvukF&kC|K>DFJ1cYo$D9f2{s%c~H_F9*TH=b*J9tTrjcjFA@DcM!qZ+FBnlwQP23Qv-xPOa(K1V^QnF@up&c zs-1`G!lm54+*jl}N*o8@kJ~u_CGN+RJ{!LKg?B!Pk}0mQ)MTnDlLpKj2pU67CZ5H8 zXXi=z%k~*U23cbYLIwuGfX;b#_s5V$+W5_;`PI<9zExi3<*DD6uB%N=3nNGGaKC#9 z6vnF9Tr;?9CVgC|If8S0phg5vAP}@Wtid>t)vz)NH^*8E*uh&(iK_TQ5_wrUF3xT= z3{>X#K=}R3JBwwi^CTq4I}?`*MnAtN~=`w-xvST24TUkM%W^4$vEVjQK)1KrY z3W-}OJ@k6dPOD8rp$#OXB&B840OTnt%vmz^hIe_XWZaubrO;JfJE<|92YK0+GSJEE zi4D|>smgDQzK-=MX7U(I-pf+j@}m{Mr3?|_^7E%($*wTwMMO?+lWwxSNgl+N4QD1 z=mzmuHz#tM6dvU&)SK+NWzzW@j)a=hIhjO+eowH;P+QYMM`#bpUu<~RKD@KSpF=pL zuzAexbx&_qE7-UMXn^wc(}**~PWi^lva3Pdj4g~ZAOwX*cbkeuMMVuaI9*dV|IyZm zxKp$~i!lhb$Pqkpb6IlZYw=&7VWdQ#5rUW55)`hXUE`4OxE+H_h~t%c1QhH-qV8G; z3s)4Ob(>VF=kM_+U4z)F?fckrM?h1=F17*ndopELRW%bt6$oBxgdUBHwZ`f^%rav{ z)M!o{g9R~4eG^It06L87l?9sWMnuTH2)z2THR+(cgk$Ps_3L>_%&%!0lHHp%H(S$- zkDGkO1p7piyzkB?`I(d`EA@<=Y9`@BGIb|tL3|tnyV@8)VOMy;Mbw56dcFGO%_26r zLkrCuLslUL?P>>q91Z#ruhjU_n=FF!5nQW(@P1#zIacvz;`TNnB?#w{(Bz=#AnXE_ znFF$t-;hkfmQZSUzC7EH8H!_<=xh%ZiwtjMf?!pW3$cV%^AMV@nbH=gc2Q&K?thAj zXk)!{+aGOqRKouV}{UTW)2Cm;^U~sp&miS5D-U<+9l%(02x{LZ)$7tC##UcdG2U(P@);&;?pgz2;@vx1aG4%Qqd+|h7KE=F6+t% z0Q%v*!I3wmsXO=nwMuq6qxrzxZDaJ-I^z<>+^EJ<{eTzO?zX;VA|*}PE-sk zWzan{%qNnJPhv@59HDtDxAP)!a}CztoZlvJ*?cbB@a!s<;hz&xpIJpP;Osyvo4M@p znzctQs{tOeQ5sL~${7a>w@}5BslMME&)6U7OYQG5QQD*`zFIT|xGVdRkz^#2Lq)*y zd7HpyMvdcNxcz$-{aCjY*jVM`%Tw4Z)j+cRap{u)q>FlGL8Ro|M{raN!66%qsi@H> zv2ET~I~fJ`U7!2dDec@BBDKiqUAxG6{T|?LoudelyZW8IT+5}nMKvQcaB;r*+;lhh z$K)<2qzg-@j^t$MFqVJE=zGq0VjS?NI}JZ<7|8CeoO5`0g+E&L!~6s{B$C%T^#w7K zR0i| zj>9JzXuEg>f@xqN{>!1F+2jfWwbFLu|41|lJ7pKgTbZcYMOE@!X$&x0;3B+L0Sa>e_;zUkJ?%j32Z zLSGmZQ%o;$-SjRX?v8(}Fc8g*FmrT_vKE0cpT8Z&&IFBYen?MGLo=BuszW&IS=z@H zsZk_FPC-!`p%0ZBdq;I%3fRFt{;uch-lnZ^nOxpN**)HrPtWCyoi=1qLHf&&zFr4W z!M<3sT+qP_=uwP?YU7HSnywPT)w6$fh;|91xW#LhwU40{c(>Q&0KCCaAp&bo(9MN> zL2JZ#l=f6E5#0PPqg$MmEaz^0UtX5<8%n>N-Kx2KVI_X^795dlS<@7zzy=Y?WRQk> z&$@>Ka3*g}mxu!O+el6*5rCrX8i-#5C@D`5j}(BSShbxZf)?*tIj}f~^#KP@HMKQ^ zxuYXZ;w^zpxj1tcGG7<`9je!jd4JqiI1N(FU8n_EO}9A%KF712z@9Hj*()wC4u+J_ zz)DbAzwP6UB>fZj&LP>7+yXx$%XSxfzH`DU(^3`q+8WY*dPBhSPDjp^MwrEracOu_&GZ7(Us)Env z%zcjdP8SRy7Unl8Z&yaIdU6_6*}MN(E{c)O&17b0rEH7gtT|Y%`Sk44Joib@wmjLa z5y&yO0Cvb7;K<4_S%FA4Jo8n~k_uKbUDHW-G`BvC`K*+kW}iiJ(e+ykz;?>-f9GWc z+8If7w^#^wJn?9o;LNvX3*!*t%{&WT{VwwLv&1Q)EYKsB?(+HtHJ2XGMGLDWw}mjsU3&^IU(malD!!i=ymj-_rjA7$6iniKr5s{^V@-lJP`qM>S1Sq zD`wr9xv+IYD&_>g+@ogUNA^n+A&2t&hsz5KVgPz$GvP%%{SJUWv6{Z0Uu9`4$0VH^ z-t3>*4HT0w4mR+`U+}purs*vS#S~iS!bsfnJi7wS1jL!;T^uKMrN_Im7ytzVsi?~b z?&G4SmvDMJtxC)FN2+tqBXwKRu>$_f*G6S*p%ihz{yVCMqdJ*tfgF}s3dKT9x%YP?O3yoxq8# z>{-UGrWH^I;t`Z_^c(>8-RJzKI$lM!B%Uqr3GBWSND5%WlwMhKQg~M1 zmWex%TX#M^pCtu<11Sc@(l&2h}Fu@6o=a0=ky=Kbf|uu|#I$dak7uh`XQv4mv<=-6&fO;x^ zKxbxnP99MRGPoajl9UdxeW{Ouz(+!LuM?vO=XuD^&aOD56|;YE@Ug~56&c)&489=~ z0i^?mOz|OQM38D>3Ao7;9O9Jb|L~jluHx2yV^)A=KBZakUqh=zbIXIF2Qki8dnw#S z!7JyM-r)kJ!S}S)mO{mdUU+g5?#X>rX2OEdNhwY5D7|8U3X*LeO>zt$;j(6O;o@o{ z{|7+DLJC~epF#fxP3P1}q{hPZ`7Ssz`md5!zI`s*$Yc@L7R)>5`$Oy34(tS)CNRMH zvh`s9hDwt^y-taPI*uGKx*2Eo20B1|a8en1*SvlDa+HBJ?8)FVxugBc>;U~7V^c2= zk6T&23Hst9_qgcKZ8OaUk%oJYoVuJ>sIN_)lDe?j8`ZHa0QM^7P$t#HY22_V4%2)D zWJptiGS+)(S#NA}R6}GrerjhbL$rje_;u;_jscS+n`lHtzF~w!%Oa5=Kcy*(R7C$T z%8LJ8IsAWXJAi%fGNtI{RS9%DKBU@;5WI299_ct-GWE0Jxb7bpS>c{5U$&19ex$`X z7y}tPwL*Uq zw7I!iOa9lpg|a6nW5*b$dfK#>W%t{68gq$s(suLNpMF1TUTQvLKYjYrgRSoyiC0&v zojpY*qpgx~__36oz>l{y}7u<%B=$II~r{zl{#l;g_ zs3^&1KFV#;AN*1%_s8tTwGGevcNa1;9`V~RrGHS&^aB(yJT-Uag#B6HtkCc6M)C+YSD5&rhHQS%nv)XW*vDZ<8X*8eYC)HdcLp!@pYvrD~pG3aVEc^VWM_P fbZ9=~SYDWvUX}s;_$}~f2GA8{O>B{(S@8b>E=?ji literal 0 HcmV?d00001 diff --git a/doc/arch/cache/images/ats-cache-write-cursor.png b/doc/arch/cache/images/ats-cache-write-cursor.png new file mode 100644 index 0000000000000000000000000000000000000000..cb31d73a3c91784ce106c804d362dc9c05f4ba6d GIT binary patch literal 6593 zcmYLucOcbo^#9u?ghba?*NW^FSxMI($=>7Idt`MpvUQD1B4m{vKvn6kUVz2c zs~N+q4)3E+JaAoR16}347LC+~0_MgiHUiAFVxnG6k;O^WlnzQh?7hU1T)aHAS>i%3 zKM41++EGi#@beD0tL49@nIjg%dmT@bmHL~5)ERQ7OMOFs!R+9p@(S!JAp|L~wc{YAOPPZPhK3uy(v4K1@>WPIj#>BmId?%pLCJ_s zUOv9GKG(^2c+SLO5S*YxNKQ`6nKl9bLeO%4Zf@|qZ>%$tPCA%3>*VAlkPw=0$F&QM zj*nv}uc~-9Yo!f{Vb%QnBveS*T-d+&iR{HX!bTM-$!{ido5MZEZ!PDd@9l0`|uY zO9bdG%8H9&Y-}42`RXV%1r0%4J8tLG$?X0XqYm01)v)X(&uP{uldCQ^>lYo@hZDwj=(n~OgQ)K;2 zS!pR^U-RC*j-PW)sy&2LFR54{A3Qk0<}=Ul6P4TFzuO*dQeiz!OiVmH1jUOLf`5Od zVUvc|@e#yVSXfw0>)q|nk6s7Y+C6G1YAJ4bSaavuwUU0j8iIRI)egf+sFD+a&AGX_ z4s-E@ScF8ZE|HTDWy|(o%3@1&DzSSo&4JYaWMUs9tr8Rz zEt^*S1vi3qVgxbk!H=nt&!2_pp?%e7aZTOt`x!AmFyB=*Xro_VPy5sG8zi?c_7w{!~gngP6Ob@fze} zJn9<}SFS7y>F&P z5*O6Xv>D**`<#jf+QWwvSYokQaBZVFAI0oip8VA5@@ZrAKx&myc}cSDihZQoeyz(y z-1%ppMzcgV`Slfo_GWgQw}** zjJ6-f$_z(FMpSz`+uI>FddQvxL0qk1x5-!8ECN9J6fF(xniKN3ktZqSbVph47Bp}f zO<`YDS65e2@$>Gj!^4$Cbu~4mFc}e%k0gW&%n&?$%ByMa=FOW6PyVpDh?pcjop@`? zEnpocI1uaQke<5;wd@Ld=I+b>XviAkyce+MZ=gaje_3m@W!^&*2_<_#O zaa8!B4R{FRU>-g`7Z;bzt72TH$xMy>hhH+1tj=`ZCAheam{do4dv)Y-gFvTK1oeUxhKu#Nk&ec18hjOT&MkKd*K?r{_irO5nQQFL90kQ>IVZ?VlL$v zM&z4jre`-(j441uPIq^A%i}*|`n|tjP*hYV>GqMqgFQP8b2m3hx*Cg}U5Zlk z@aoR&U%PR4FC33LtsTcsdJ!0-qn*5zBo0H_U7DGY4v?p&prWd@YzsyrS9?5vEq(}Z zZkF-tW}?GUvB|AQKM@Wg4)6( zN2aHbqvcP-${BA{Qc{MTuCPTWC@U)mDuZ+fV~9}l^E-HJT5nYU7skxo39OS&0{$D( zv4k5Ma#%UG?MYza=jYE?$o_b-hli$6>~gNnmJi7;xFe~j=72;dCB%k>;myOKT!7ZR z8}27!va)9Z769E|BV5orh(zmL5M;o_X$;06H9P%t?6c1cBroYyj;fADchb#--aZm6%u#X*Ne2;wCq;5>10 z{gy1IPXPq%Jiw>+Kl~nRYEp_{yHF5 zASF!1%*@Pu4;7wj3z4tS|73Z7Jn`h@z)Lymauhl%J-xwa;}hVQMESoBjg2oSA~Ruf zUUbsLvdjoKH~#w2;aqvXsgI=?kTBBZ&u3~Yx0TT84zz~N4a486g`|%(cl+B^)%K0k z4`st3nsSUGjtmPQRzK3WHI=O1oxw_`?5my)|7q1Z?^DP}8 zUKWOlXM#<+#=ga*h+IO^SD0mTp_w<^an4rA6=H;hsnseBp^cD>g$WB`r}AfvpQ(@_ z3<%&%l38YXYvQ5&0d^EblDi|<_QEkjiu1{rwl!J>b@nGUi%q? z@pz?#hJjb@c&z5gA4S+rU9#9+5*ddfBss zruN3kn!5A|i>iok4h#bMeA&x##+jzt<;O|>;_&p^Pz;9p;_YFuBhKTScl^7qcvtCh zsUx`VjPM4+N{n^==0)N|2ud;5NEibdC(!#!5@xM8&Y_Ptd$*>5%xSDG z|5RBX@w0U-4kjFU#2|z%xsr}CuEqL^i(_}?gU#T4roB%qbhstgFQLof@CDcV|0(+! zryG7AqAk3O4nzthalXOW@ULSwVzbdGd|?|aWOY{}1`c8sfl#DCyViA%UVy zng4AQArh>8k*iESG;l5tING(sYWzmja0({zI^sgU9Vd(qMxU<5+x}jtC?QPE6WzKMkh&GPA)p|>jv-l+lB0yH?$Kvtp1X%kodqLCDNmC9d5ZP z3ryIE$7p=4m{y3^Z&Ul#nQV8pgfIogJAq3!UE_?<)p^|Nk~>zhu5NQfZb2tW{Ou2C z)yR#zwU)m)9pC*?*{WDSLWJL>&BZ*Kr`cc2Pc&VSbeZ?Ro2e6za-0$jKC97_(j8QB z+YyS;Q*`F-lTxblghwAVOy8QtE!`oGLn5PDgl+I8MD8bJd5sHn@+}P$OX%Qy>R;;( zd5a4yog^wzPE5i^5B;AFG=GV&;S;tfR(kmA=!fW#!yCr{ImKn}y-y}Qf~`Yk^=f;= zxAz7!Hq3u%@l?~d;R4JhhDS_MJXk=ActRRoHHURA%e&EEML zt!_N?#}(&`Uxl{0Nf^O9830H+aK4NYMq>++N=)PEzCoj9q`3d;&>LE_4nQ-Xf|FU{(S8(HdvOy~ zaD+y?xX77F+>p~Cns5g=wHjv3%AF@%{`8MvwZprbSy2tw+a1-Xx!!j-YKNL%A`Tdt zUY!Py0>mk=_$ciwskxN6tcS9N{UzlFrN8~ zunrDqK0)=k;L%`+(cirms7d9=mEYx&=Z+vVe07k3E8qMuWI~w1-}ZN^KU!(V6q!M2 zyQ)Bo1KOf71e))96fp6#5decsmXR1ux`&J}|0Zql4%@u^u6?KilS=9te;NQwOfRh=Tne1KjGw+4a>%6k8P56~7`Y zL5=H;{N+fvc;JZIrn4Pqul}x@{)^!@2l-I5URU&&eZJinvD@n{HQ!W+h*Z}}(dBG# z%lAQ~!ZzN!R!ASaFmY^=`VD*I{)KeIxvg`oCg7J8%y)nSZVv*qYiWctvVezPd{-3j zc{kv7z=9WKe#JY!oW4t2?z;J-k;lRJ=8$z(hw06J5~q`G42H zVgXlAQ#T-84LJ~U!Gpu|2*IU4x~^K`!@u|7&L{RHJ7dBygD)l-8LH`lMb!ZLU9j74 zwUfE}9$4`|*{D7*s#H%M=4Yis+kLDZ0tNX*r10%ns~J94lIdQtYL zpyS0hH=pjT^nh0kkfSMXkgGP9m!t1Mp@HD6PA50F0=1W5)d*H%#c$uv)qAk{Ly!+B zGQIF4|l8vRmDnAq7TWxa!WY3xw*Nv zHuVk!5y2Qbqv{uePfm~afG`f|BGEASli##Xp$~Ed?;2pgd5wVZ5wis7crXRH__&8U`)`QgEMJ` zL_V{CCMRF*=2{3=_8OZD361}B8>F(o@ZnPEqBIN zRaNx}+XMw=T($ygXG*x`-&+nRVCMy%JGjl9@Z*OoZ(Q5qGWkLN<%Zz1AY)^O5-CMm zE@%K;rwWNeHDj^KrNcAz9@V6ex`F3Sjf{*;Oc-mAXsL(58TMIHK3wABwQt{UVSY-5 z{6(p$#m%{I+#_KZxxmJxYu)bd?(pyfzj>5cWfjk@`QZ3#$B}g=BLi-9kCw_^Hf)`p zzYT@__0iX-lgEQ@KxhgZ1vj@XUL!wdcB&o8?f z%9f2{Z1}c6WShrE9XNO*?d>gEQ4G4oO911;uWM?4gAT2{hW(d>{Rm@MJ2vD=e7WE1 zTNLqQzb;Cr60ty<@+7wT=0H(6oFHwCP z5Yh0J1odn(4Q9znI^K=fV}p5~1oEHt>x(@qh*VYeHl530Y^HR};HN$CyoaZSCv`s78#+Dly7-%CBWdy}8250Z-iIRps4G{=;#ubFj?7zGXC@8u8^| zil6_B>(?h{8n(78V8C}j47EKk8|)56Pyjy-ZU_Hq0@Fh5jGoFJ7=#1;&?5wE0VPe(tj7G8&t|sd?h3?uZx^+XeV0CntZU#l1NR2An~^V{%~7 zb&wWdI&jhTdo4uxF&xIHHHn^jyDDJwM#YHg@&s-_R7 z-~Nf(SIsu32AKsA$TjJo+uJ$m>5D~#3b(=d!u7W2VtdnetGcf3*k*QKUf#EF-{Rsf z1y(4QUW^WodLe2q04hK z>e}m1i;L_mQ0#4PZeS7ZW%2gNszfDu+^$X!hHh2E#dnN(;^X&83cLkA+rE zYy$oQYHzU0wx_SZe=~0rsxWK($o0ZpMocWCdRE~o$WpDIO>=NK{QPu7zKcbAyuw_4 z&DqXwjXMV=A|%v)OhH?7Z`utor7U4=z{$aurluy~!w2_&csBy~E3?s|JRv1ez!?EsMU)!h7qM#`D0f+KJ-ii!z|Mrn)+1jS-rgn+rKVS#Y{Ho#nUtkzDZe zykkAdW#-QYoe}AR+3D#D<`nq=NWPS*1GcudtIAl8C0}Xn968NYS1`fo731c1C?PKO zQLW_#c3Ghl+1%U&HmbSqrgi6mi#6U>RNm zqvPh*7Ux*0SxZo=;Q+Clz%7mUZNTuWs~o&G)IQ+52$f>=tL*H;-IvU=JaCz^8-xA> zI(0DBzXg;f$K8a&pP9gs*Y&oNJ;}1liP4?f@@MjXu+_%rEow#JYZplMzLrvjqSe#? E0YC(8NdN!< literal 0 HcmV?d00001 diff --git a/doc/arch/cache/images/cache-directory-structure.png b/doc/arch/cache/images/cache-directory-structure.png new file mode 100644 index 0000000000000000000000000000000000000000..8719d1ea2d842753b6a2db5ae3c475e48dea4e73 GIT binary patch literal 28553 zcmYIw1ys~q_cjPfNtbkoqzV#}(jl#MNO#vrGa@Y@2#BO05=w)B3^CFr0#X9Q(9%N> z{T=ST@BecxmIKW1%sFS*v!A^;u{v5RLxgBKo5YKl*u zK`gdgNPM47hU|W+N0>LPyQE|t?PzY!v^IPg=X${A<}nep?iQGJ>@>&q{j-6a$PeO2 z%H1q8Mpszw%s=umUAsh+J8Q6-);rsjh2W!IvyK*hZP^itBk{lO(O&Lm`j1 z$I-+Gk^AQruY|yVuQ-|n|9izm3A@#b!wB}&siA`+Sd9O*7fC8F^{NOuU;9EH5s4G@ zY2J`#jjHmkw_$Q%gBohF`JZ>dBGRB9ruRQK-Zik?)`4RUi!-fEkBsOR>zSJJ^_+|> zoSb)k(bUv5H#gsR11rLyuc4OG#<2*ha>5P`cqRVJ)>d($u!WvD|M6g1nydZ_l6#OPvrdipPAW$GFl@o{l4ygs7jSXgfV9UuGi zhu5OKqC$|5kB^6^{n(M9uC5N*kesIq4peW%4OuuD$T(q1G$dks5@MxK=X+rZFOF9zpERcddxUZflE+jveGS}C~QV0kL7?>QzoKVrn*v85m zjt-mo^Jke59L6lh?&dJvvjk-e^@4%ps32-;p^L`WyHPZu^QNVQ3s~$wP@}u~HY18F zX(@hsyHNy8B4T2)mxp7J_(4f>9%J-fU5LG265$6;N6u%@o;}TZOvNh0y$Zx9W}Svi zL$_$g&O{_EbW9@2U^MLG>=45a2->_pSRBes>vXc#VawV!;DY)En@|xta?eKQ1#Xlg z5Ed|v3U6{Ae>}*}Y}5T}GrL=8KGI+(r+aKcO>w01F&b5>sIvL`p5^>we8S^MoXSqr z%eeWTcWh)%sX*JbY@;99Y3#rjb7sSuN|BczAr=FO>RDn?^gk5v_0MbouVyW*5QH zwS7(KC5o(c2Z6)#;ho*WN%W?_{x%zSzKlJLSP7v-_c?S8 zd>vB)vxNi}34T6Gw2R$_Nf@CXcA=qmMI;|Q3_DK3@AhiZu70-7iJ4c3^5E$sJeB^4 zJU=@NmdX2@sq#f(>hi~HLL*C4(`MJ{n#9D!jg5`6GG6`{Z(5+c8ymcQe5scAl<1z> zX`8_;`?jJx(%9hF9d@q-3JiI_P!G6`e+>?cW03H+?M|TmHeV{|DT-`-gR{9p9huC~ z0mZX8YBtgfS~aB6HmZ{A-nhjXWyl5@-!+t}7&N?;k%AfIJSVn-ZJhkl5hInlDn668 zg`itFma8J6vVn&@@9^QwlKxv)=eyCc>H{=haQx@bT-YZ*Yd^c%aBy%Oao?q<|KWqQ z&0jA>Gxt}%Blc`4bId_@^iSZ+)ae;#-zAuTE?=RYHeKuBG2hB4n>_1h&EcfJ{%PZnO+#W9=-Q;zsLW3#qX{IH4!yR8SHXlodQ_ z+f+n%y^7>q+L%U~w^{c{*L&;W>hhedIo{*r(I7jN1p8JAb#=Eg95~3r*B>o{RMVo! zC3s~C6wTwV+T#9@|G4(!OMC8&S=RJaPQexddnqj~?Y$fO5xLwBk7cKbR($tKQc`k7 zx%8t41Cphu_@fe#%EMqvTp_m3peYWBp9Pxqqu^O1^qK?l2$F-OGAynAq0z!kCFQK# z`GGVzDCivM+xdXyRZUY9Z5pGq_MV?kw0s5H$Q9pdniKL2UJ2WeJ9LQSSz$@8h-$+8 zAiIZC>0Xv!K}!E6vOz0nadDAdB{6??nTEv}ix!wVo3aou?~jqG-@k2&GKGbPqe(b9n{6zo02}<^SdU)ci@vCDB z0^?BJ6fV49BeRMeASyHZ1)JOnRjKo>=8oAaceNswPw2WN3&txw*~2FB-$j(0_s-VX zX=rK^-QxP~h!#05_f6`nud9?%ZLJ z4mNy;|M=1Bbbp0M@FHbnYsBJpC?Q)S~w02*hx`%9&q+fB>;UuB#9xHYQK7BU{GOPt8A-@^Jx7hj${O2 z$zHB&cRfPG;ZMK;^f_57PKTT)GNY zq>iU-OdH(ou=Cj5+}x@pf8u!=alPoEY?%<=JB#3?AaW-{GJGv7`!UVhhR@mw7Z-(U z>!KLrx%zurix5NLhPx}oYl?CO^xcK(;TM z3i+%uTri8LV)aBO^jc9%vM|BBFyT4h=S<8E$HPtej@qYYb=n(qws6AG^O^Y%fi5A2 zhLc~7Yd=XXKn~f)x#%?kKX|>_gS^oV0v9xlXzY#Gw6s z?{~L}Z`ai=2e9n53z-@iM2UWpl1qL248?kHv-~T}s(Sx>uW}2Qw2-WPh^{N|!JljW zTZs+~+A$puEK%Myw0_1pZntAboX5$wuvy^>yIt}e$y%W+CJ?1{KV`GKsFic9s|zVR zk+TuqZ3*{UHpmp?+dwurR;@>Qo|p$hg*oKYx0lwXD$^nS88^7fa;VfUI>EmhYv7~O z3K8K^#uGLR1qzMe4`*Z>@z(m8lUK}n+X*g;P2*Huh@w2iz0S>)|S4=ntr9_18ge zn%fnIT*>;t@oa-Zdf}?z|`MMXU^Y1S6X5Fm>O%pyzI}PKd{)MgTmJ#lyf{NY#miKDNO$ytY*$iO~qF8ecN|)rC>*ipLgtVQCA*F5PuS5SM3H7-m^L*(Ou>G6=L0w zB;DTFNSRM2UA)gvDMiUle2<%wCc2`B?^tKBpc~-XF7-Si=g*l^_3<($C>s+huA|%> z48D&D zQ}S9+c`K>8s0$q4D@CSyVs`zuB*w?SC11Zj4OIZ3H6KZZQhNBUIZCZz;u^Rmqh|kd z{)Yo<%O_V?z#q4T?IKzVI#KZhbDa3^!QpbHgO(RIsXXseQc?AJsN#9urG&{J`_sJl za*=LbO4XKWavI|@b|%JEE70x+-A7aAsPqJa%dx&5gFSXP_5ao{t+^Qgz|H2zXaK%bh}p=ilHu%|^20s-qGy+RdA& zGFttbXt6y4tJZ!fW&RE6qSPJIi|vN_D?Mje_PalKLm4~D?pnlRGN~(?$Gj$_`QRKt zB%B3vRQL-($(LPac>5y|iHZ@5U*riGI|5YHjb1Xo;Q2axxWTb;Dpl3lq^!ig`k^AOP>@gAX*<`v!kW3I4KEcy)IZn;D_^ zeh#B8480KTvpNWeUZ_Wss#qr=b$6FsX$&XKv9a&9g`eKZ5_OiAX=-SoxfObOG?^h} zX#xJ}=%9dx3Yvb~D$1}p126M~J-4PFIPJ&NbkS7-BXAWCy9%x&*=bx#tf_g3XF)iP zDMM}c-8a^7mdJK}w5YzmexGMByksHT*Vrys+3sE)ueZkX?@S!F&ylJb05sC8Jlxib zIe4Iq;-EsRrYe`?PF7k*N3kvu(g?~8GKU^Q`FR>#Dv0%DjR6!Z(Sn>-d zx)bntBsbleYV=wHhUjKD@SZf~0VS*mIG7fuXRReMAd{*?(-W44r^9#VnhncMaNr~) zB*M00Spuft2D2p&I!I(Gcj}A$G3=EFDOgjE|24t5}pih-@SzKrjt9)+0zwOigL{jW9L`q#op@r>BGT_$D8naFp@OKR!DP z+%mPXDV#Nwj|s|0x)rvy%_o>{)p9o-=!8lRY;rlDc~F#pXhi}Gs-(DpTM*;D-E zVvu^>z0=>q5!pyUfY7QiYqAE;eY%|mp-eh7oFzu`L|A_96)s7C57XC0z&317R_00g ztb*gu&dv^It>bw;qJ#~-2N0(FeoRBg&W?NJ&tFp3j*6pMqHd$ID~|jQ!?_`wH|Pwy(j~@ z#F$C!fDH^C5wt5E{ByA@p6n|P6_qrI(135Fo;^~-t0pHv+%xib35f%X(BW;4pvt~{ zd6l#_n5nO$gRv?5nXNMqFNR@eFjty>X2~4fYuVSYoofKQ-Z$dz?9f3G|Ilri5RM6u zaU^DAz?Hv<4ezZc`rxAf`RM|qPvkN&C9Kx-clptsMQLS_ATUGNIRf&LjL|;s!7qcC2p2Fxf|NXye*mus7*Qo#7FVviNM=?Oog?zwJ?~OJsxXr z6f{6YtFb=!39P4`q(5s7zYm88Qfzbn@26;JXgq(OHVAk`LY&=Ou5CB(%eYHVpY_;} zH!p*|^FRl+(~dCRr0on!kYmvS9Hek0X_7p=`F4=U&7L^XFb38p44hpqSJ*$FVf4+_ z*vF^#W<@-lU74-#|EvfSgdZfl;87v)D7ops$65WEGcz*6DlBO0>iU#1R@U?}@Qd8! zivm252J3;F{k345xQ4b_+VivCy`x@Q_5yb~SezHwxTB9^ghB-i=Iht5y$iQ^goHBk z?ka5Mm6VhuH-8zOnBZ!As)Eb>ltt?mG~mI+yvZ;N!Uj-*j|N<%0i2Nc5D*a13-t2? zsy`a-vj#L=-ISUV9W2vifHOpRdOHnppfc)7EjIg|{^ad{y#t;jgHG*Me|9yb&I|8n z6^L|pFH&R+bBgRqQNo(5uWqi~qM*9E`txUHd$y6;&zbb5>UKjpe9{Iw7-8{`o_EuUpA^?=}9O^XkwQ7%4vasLL5w0fR9Zm#4j&5EflMx@FBm3EQkj-z=Gn zDfq;6``e7WX}vQQHkJY*78NzMysXJ?oxOYA?a1oz3%nE@p0IbH{Bv%7R&k2o#3?|% zK38&y^^)zwAV7%fn8#5_;+IZ-GCbv75TF}L1jpcR9-1g9! zod5U4#Nyrj{a7ZjI12=AWUnXp?AOE8_ah@C03T(EdGrSphtm<&XAMGV81Cy*LRY)?*om3EmV4i*63fA7 zUcH14Khm*kdWI@+VT_;&H4c6~)%-=#&s7uKpj&NGnuz!lK$au6omNq=%D;c7b3qpd z(SwiUffRbaMpmaTo;qC;=@+=PeEUYdSsl?btO9P+YTh;q!pPu??jjpoVxe?XXNbf* zlQ2l?$wzKSK*b1(@tMxqnHXrabEv4OVu|%0-1=a{{F2URoMVreU+#phf?xr8cz!8E z#gBapGsrZ%lavPnT<@D7fGP6xTVL`XT3Lj9?fuR?I=HfX3Y-;i&x7LcQ@ORT_L#5t zzPZiV=H4Zsdx^K=;^HzeFhDk2Xr(^gO!M!)tCUgb7F4v(SnS?ty^Mc|*$!Jq56&aW z){G8jX!P9m>h9G})_iLkv;l+Rj^hu#_W9Cw^B)^H+>G9uK_ihJy|nh;ckf)(JuZ zkHE>vN%HpYXYIU1RTFA`<@}V<+$)jJLqp!<5V@&EE^!_+DYgW4b*c2* zL&9uNoW!e05B-agj7sZHn>vC;A0k}3~Q2zcJtO%a{ui=N0OFjdbtN^K;gU`5NPO4(45_m z4!b+Kqe=<;@??8*Y6?`k%)Ckwe#PbS|J?%i-Xg1C1pNT+xztK-P~p}}q;Cm#3}yXj zKX!fC$`{KwCZ(JN2>KX}Bgaw@B;kh%>qOE2sD&jw!hLb?a7=sn{eun(vDZbfZE!q- z3XnboA`@1DWMhVghGU1yPvA2O-=Sf9T{IHEG|hnIiQxm*-t?5g1&Z)BF2_=_M~`Ah z@?D&rAu1N>I;i7z!N5W}8|Lwd!A2qi8C|d(gI7)~2&}+)_OBfrW`YH=a6EuKxG8Y7 zv2${AfOB8;m-^|GD)^6#z$ZGqn|S)6$}_&EYHbhyG->yD+j5K|oI={h#s;WM%qrO! z?k&s=gpZB%|9|2W(r`9YtWx(lFwRRV4|IRbej28-amE)}MiiqxqyRmP|SQw!`J(qLOdDT@!j z4u|RkdTj8Ho$F!CkZYmqv#_f8T*V0W`m!c1ijs;^# zOjNZ0c@ysct|j+rAY5gdF&4^wkNnPUpiI2Hyxu;p4FCGQe57=y2)yUd&F`H?CMEz{ zjtaArarnBSv8%lcfrti7Le{yd$L4~%3jplUe9$U*p zkzu<7mWeI_Kv8U!ZEV=(PiHeR3;C^8WZSuXxn^8!<;VaSPo8)gR|QQ zG~dP1&F%G4%VQ+)$v0{&Zi$-eP$WT`@IvfdU#dCAJho#AykDwi8WS9^x&FfQZ)Ohn z)l!HCU2~x&Ik>gk&@l?X7;Z-Ge{Jt4P4NA@{QUf#!{(-@@$=>t&#QG%E?lo267dCG z3WtgxZ%O^Z{5;HbzR>hh_T{x6B8x`pY$QVsf+!`0~>nkfOy2Zt~|4GTvcLX>Tnn*e6(b-x4A2=ZYl%W6m z_~*qBHT7Q=D!ue)G(2xG>|~7LIWBeUt4|pVuXev#PHYK|Dr{#Iexo%+y_#(O_4R8} zQIGPYS~lsIjw0$U^Yfuf*8y#NS1Tn%haQu99hok=VLUI-#`_*@w=`_iQRgMJy>3I# zxhX7GzA&ujEcf69u(Cu=Y8CRYY5^!DK!qp4W5wE@5{#fT+bj(#UqrY1WN4VNGyxFbk*b+}hr33yolm#Pr zVNHAMlWp_@uxzD!{*0a%3MA6D#@LlKu)`3jU?6S1!gkELUrENsHd*lVwUzq3zGrpl z*D%?0`0$4Se7=FjV;;Ok`k!W5En(PjX%m6uOZv5nzR4JT0HQ$PjIkpDm0P!0j_V2w7|odR>})FfSd#1)=D@C8pFgLvFlq8GGAd>L-0(w#8lQ@&x^7?cac{QSx4cGr_@KC&;VzbP$Eh<>9xqQG76jX_5O(T# zJ`S?Kb7RB665^=%#Q%*Sq4Oapebx^#FZ6UEE-rkwcUbHl>Dw4N-jFg3V2op0IJ>)d zr-QJtpYs^S`eUUgg7M`kVsy zn)5Ft9b(h?%{Ht(y9KhcXE@l`Z%#^I)|OOM9CsUi16N%Yk053F&7BJ=C@H~<8?tY8 z;JmZ@xBcE5`EC__w~l$#gHSI1^UKgZ3UF&y8W+L)Ii(|Wa{{1}Yk4g~Ec5FzyqVi6 zsjvK!|FJ*KK=4yyxoy&m$n=k^Oi7l-RX8Fm()U~VZ%j1!SaGf?s#iRAX)mR!k*5!Y z5XWY~I>HR8&dfw+_VfVon;LBoawDx-T1zY!Lap(j8}B*i)3?mM<$(|>9suRd9SQY| z449=Spj#}V0L?SDh#U|BkHajsj>BvR{G3eO_nm5Br&H!0{3S&IS=O%8)iK`K0Gj^^CPC?Dric?c zrQnVWmNNrlVHvZmDwQ&L_7lF$1aPUr{LQ!B+xLQf5k|A2`qKZLFI{eCk*OquX|~Oe&-+9Tk9}gW2d*pY8Ie>Cv@!E7&7x`$n<@|X#AI}?Jt&> z&SuT*x~SOjrM9+9g32EHUf@5Cjg0{|9c%{71K8RYJl0TO|Dc2Uiqu)f>KiC5(ad)2 zuMD)E!g{)rErVHrw0 zGtBxR-|O<10!bf$FOGH9@iOjA?a335fC~4R#35|Dexb&kkGR*nhmdjK+%q zaSQtxvc|o{z<=G%O(X-y$_#q3{Ca7tL@Wj9MZnx9AU^&N66Hy+skEWH&of@Cv_xRP zf{%{sag&sj;ruUD8W&3^Y3n$ozC9TFn)yjfn4&h*ue3VoZ92zXmD<;P4sDr@>$N~n2URVDxTWY@D9Dewok^4e_kaCO>0md3b~PNC)2PXs2td)M zh!%d~PCPS_Zy+dQ!C#;N6A560vy&6Nnr)KnHP#%*EeE>fu4U0|HprvbzD(p&Un_Ao z3vft({La0YpJv#eyeQ9@2=zDCnE$ZfzI%1@gC_K#dR5pab!^qP<4SS+qpC)d5+2jr z@2giHFy_|5JrGA6*^u+4L6y$i;pSMuiZXnFfV&KB8x#l7$QiIAN41~cWn>h4Yp?dF z@sSmmSxLo)W6jxS*2PH4qwuEvCfC_*J-k0ui!IVgjqWQf2%cS^3tewa(-_vo%lJJO zL_ikXdl}t2h#V@Mw!?Ql+SvN3345(MEq>gl4BG%*5RYYJijn2Fl{4H)-uACBunK3g zKAuv_bAt}p*$M`(vRA^Rzi)PR1yNBn|9wk*J36@8jx@>f>UxIREr4GK<@5lexn7hM z^o21YF`UUt8as*9GRBhdBrAw5@I02u&@8qXDZmLhoG+WIMbVj1ZEOHQ4i{Q~&Y#!r zZcaMhBr{_x-^@Fz>IV~Wp`m0uOXl;C>-`Lbp8GAicb1dV%JVLbi~d00^*o=df>rEt z*$8CF0NU68EhiRuWAW>^#D*Q)c!iyyzbq9`(b(uv`GH?sm+E1yW9|l3Ol!D6Z^qG= zq4$nS0p!}R@6Gh=uuiyKW%9EFz>8kB6iueGYVk z{(U@KHY{|1xo>nE2M44V`>#L!x5j@kau@>-fg(Omj29!Fp8w{8*6=Wg?$3({XtpK` zvp`iCgoJ;-f;P~Joc#KG4#@3b8OseZE4_cW5sCZ<6Ap*dZX0fX*AL_o6Q%=kcUClb zpgAK?N+gcQvf}9RdZMHC18@!&TMuVv02P%NoZ;7-ZC9I&A=kpig_jp52g2GN$ zDnlPt0H=?>)4TdGcq5zn(t5pm1YbY_V4D>~?jG-j$1U@_9&T=Fw9!xCrs{@<8I2Ay z=5U|Q@LI)WpZvyWuxb)diLNloCe!bS|iVA%W-w{^a1F7)T@ac>9e_UPd9ZybuP1)z<3s z>!Uz27xYjUW&f|2b=BWG9T8e((h=Eh$=dEJAG@c;Zw-93lA?L>N8}r}2Ano~Jqw7k z;Tp$-23Jv^Q5}>$My-}ReL8KHxRfu&xlYwmT#w+OA?PxwukTB+$wvwleEs_M%NGrM z9>`L!AAED0iRrnFcXz<{_+ZwpAf~)z1nHrt6wa;)iy`)}hcf9`7~`MRHc+G=GgB%L#9_avK~Tc> zx5Q0)5#~4QHTNg;fv70xcv!Bp0Q&B`NbI0`gV=>~7$pk7ygz@QbxRBXFBN=kfYfqM z&-c>+zeP)}(K;$An_Bb`{}i&jtmr)U zcBLs$U|j1i$C|s#gH9e<(O{idA&{x7wmv-VdWba06p_`!ZLJu||Az0sXxY^ASQ}=6 z(iH;1?A6-&)t1oJ%tA-B9czj&1bFIEo5Uakx5@T5yYBSf=Vo;vO3LJs__Yp70}l3!(%{xfV@OBV&a`b`R0;+)e4a(JsFBvAj9rLw%uy&JM;59XQrDu6B^p^EY*}~_UV7}R1#BK z8RCz9&&}=`Zqak1bM`r@K=IlDwa^HIr0i2WS|nc!j4v*b7@A<*I!&R47SGl8;A{7D zTAR)v&G-{Fp9R9kc8ZFTnjC#kq5tp)4-EVjADW@+npwB#8YAS4?X#n8B=73kIy&~Q z9Z0O*XaYX~YyATQ6qlpm2~eOV6P6s_tCBn(kb{ryD0_t-#+xGQ-TO1|ybpTOInMs4 z;IVuWhp@i_JWFX`0{b4Mu-6S9B)X2H-Au0mQm>U1{U*0bPoH7GOfULGx{O??7|tc^ zp%<_iDD=d36dSxR@yMsYXaHF+)Y^90zrbo}@kYDBU5ttVLD{`s^xqrF!RdzbU-YE8 zg=i%uQudh>brDM-TKrL zl~2t^sibhft6{63gYtL=h^=&VbfLhZAuF$scLB5uXgg_{KW`Bl+p)i2c2xK7tT;|Q_pjHHoY)I`HMx|aXKJF2N~rdtCl~K+xN^ac zvD{(mYq|v2LJmdD>lFU6fumvl?Y2UtunMn%>a~>Xu{AfW%kgM|f z^Gq)+3X`Z=^wVHn-M8Dq1G=d0WLt~O#{aEe5g(f)(Y4*k!94hzjt63+dIDGZZ-pCO zfinKg@!+DK+^%ZJPZP!QzyN6`pu(M`DWBZ9gqv-QWHi8nrv3E0BtJSAAcWE*US=jQsu)?zS=-DxMs+bD z)CL2sf+F~8lyBa=Ns`)+`QIZ#49qz}?I8!9#ShC$O99!br>hIt5-#C;9e|SyUz%3Z zR^0$g3%1$JazAV$3N=d$)iJ9q?g56Mhk_rd^q=JcOVrz_AKOAalbmxKz!#A-hm%KS+Av% zQd~JW{oOih`$G6M3j0M?rm(|~&S`mKj%2{jpL&-D0^$kjpyS}LHK31L``I5qq(Wrd zN_00?P{GcKIFJ!2EkAALen5ND4kQr4%y%lJr5cAc`axT6fYGNa@}9~SWipzlZF`DOsrDg_Cvd#bw>*(k(th8XHrKM$LWTdBWzF}H7 zCp}7LL1iVLbkXNd=}uNwRu5IMXt^+uX~J0-e&4>4_n(jN!SbcZ>ftG$OfQmS;s6DB zkAoT9M(n2h_{dI16Vwv&k+FbO)aq7BMXvX4aS4DQED2X2>Gg9Bxd5LDe%A@qjC*Nx zZ)=q9$E+t08qGl?b&NUxxa+0eqWnL~kpP7TJly$!tz8I`3DDth&mRp<()NZsRL=Mg zya6~CiZw?H7S2h~z?q+$GdD5e6;59wfKQ(9TW8iFxZuT-Ha;ImllLJR@(=7;&ErKR7$o7jKtdL%YoK5iK; zUF>y5zwR!!tqv*#6{S*M$ch+<@DxI8?U!4GW&D1(Eohly;8i9qEA3grgwK{69$3&Ek!N|)zi%*95V zm$@XT9|`d|HBQ{`+V5FT$eGT#3H-Tt0pDh1_Bbm$3cO1g2;p~=75Zz~dium)ub89 zsB{7*O@hc>;1$?h@W66q&%ad3qWw7tfV2{BMW-6#kh~4s&e3@3VG+sIb~2K7-QeeV zd_~63&L1V&B?=@o+>3uJLe9;te9$Onh7C=8-5DEyY|Mw|UrUSC_`cFDc%~>`Zos+I z)d9uzAxV%!={k95N#NpSA@ANZ21TB&#ba3|I9-2Y>|8s7IsBcJZ;`xLc z$sYk<-w8=(Rkrx3nOSE5oD{cqF268WvU}6z#hNT2 z&b;pZpO3C#8Tey!e!LC(0WyWxyrMs?pA28@)VdJ(o5iY!?ZI_^)AbO$PI~P{#XqJ` zu^CA9hV==`I9^;WbIZP^rBH2{1#B1_=m`S#Olvj{LQio!{UD@Hw*RhldE4AZa$Uja zGth$zvR!7em6n&fp}5ftnJC7); zVimdKL#$TWHj=!^^YbSqEaOK2Mg}NW_9Z-TT<4NBz5V?G{{IDo)<8 z1NyyFpO*?v%Y7Kot}qJ=CV2=!E*{{df#v&nu4jMis}9wYXz(40Cq!9s2_m%N6moEi z!uzTy2ga0HK6Gk_SX;AOFV=9Y3K)GF3VO&VSg~2aY+h^;>42?`4HjwlxCuvz&$DIF z=GLVZ71Z=EY)tKYwdB)#Qb;e_S8Bn=n+#Amk`=B#9Si~M)`(uGSWmE}?E^SHs6kRH zUoLWB0dS>b#VU9YxCAioM53#!%LQ&&`a(|TxZ`8r<3CM6WAzC9I0U^y$isKsxn74SCvB&zSutCzL6MTv zb@UIzp$!=z9|a1uhbRgl+AJiSG=DWb*R%YupZHxS?rr;k%m;LQbdbfcgAi5^DwU~g zS(%^}UR{I>b>5$IpP4~R8Sg4VVbmpSqDd9S9qsXB$2~JoMmS;AZbJvPXGPQUa9 z1$9)%b$IytKFf77h@gA1(w`QyTE1RajNF!=VkCos3P!xcYX_Q;V9??r2hUPxAq9r_ zcXP8N)}98~MK)-LOcyX+mdT7_g_2l2UP@l3hR5fgnpH_wLB7%>Y-yMY5CS?Vt_X_q zva*ep3`=V64=u>Uu$`tgiTY zNH<^%D3B#vgs=&c{UTUq6JA~VhS%C5`?j`^j1eTf`Kq#DLdu0qBqo+Yz&5Y3&|rAR zqwVsyzV=7?WJzb9X~Iy0>A|b`V#XA@99}daBLSk65`FtL&d4maSsN9@EdESjBp*rS z;OhDn5YWX;)V@5g6a#u!svoR)rKgZ*zv0|`XZNVeP>$NP|EUENqQDJbow`qRJc*!9 zJ6IJW9IT4g2Xl_-y`{0wnjPz}C@gm>yg-gD4B3tcYy?^J+Oq~269Wh6WO!&JLrinA z@tW|Y1Uk>RmeqH%WTv8k*oKnaeg&FwrpA{+heEY9G2y#fGY#K&X=yh_0zI~c zNfnFd6c+d%3(BX1;3wWl7*sIWj*_0?(5Ss3bF3JyqY6c z1A_>?Lt&PIu9VKvNFPzp$=?0D8Z^L@1qnd8JNsCmvJez<9B(3S?=wfW7!owl))8(H|TT_5rd3A`yi3{iFc7%%W}nu;tZ$hWM^850?5) z+JJMaWcGNjPv7wH$@pe>k)9|#-KdzP!3N#j1yU%?; zK0cmn4j_bs(VfM2G?ZYFwj`gJ$^~@5cG{QLVWG$oY{Kgl{n0nhPi>7i^79S;&Vy44^SN>yK~Jn4wm14)2sVIq#0r!K0XQ;&l;O7Fa@ENC8`M{>xHV) zvZ4_Ysl;mZmcnu-e7jlW09+l&bb3Kw_i&D6v@|J?Oz?@%(j*QL>27&H221Z7_*oF!xA^w$c^rttK<$cPi=xBEY z;QfG7rk{5Dn!zwDZI(!J2OBJttVmw^9k1-iXxTlW^UFoWPS zv{S;oESQMPOR%uNyd9pK%VawEd#woQ}$w?f+-kda+Z3ZJ1W%Pq^OUXVmZ9)avH&4oRp zt4rM1GH7iypn(X!*>q|abQ>_h1xEmsfJ`JB%8iYLefj$JVlC`y4K!KD>W^CAOpg@2 ztqwl*wzB>;ImxZ%kM%?o8i!Ng1G*$vm?!G71kD;*uIe0d!*AwN-&QbUY|aaB=j9*N zZ(5X|+C3tFYI)VGph_YouJP>_0YMeN!PVi~aNN)f>k(qdndWkC$d8hDnaD045hM1| z1pj+zPzm1r-otVQNPE&!n=C-bAjAXS1Pm7u-g(WgY5z0|nnZ%59fMIk6|~zTX48Eq zn+z0Z77y#fLH;+GC(FzVt|f%yBJPj9r>D+NI(~v_P>er@oH++C56e4}(1Wab*x{&9 zvlfGtSIt4~j)H2j7PCyqag6F(U}Eg=?*}bzGxJZ%OfUIZ(Z#tHl>K>L*S;fo?HbbX zCF9RTmE~F*H_lxSR#9sHNj6a1OM)8d*UJQrUVk}nCTb+XAbk^Va!HA@%}A$!K^JRg zML`0Af5j{8dg&}qdKyJUkBYu&sRk3fg!Nd@9+eO+6K5)<*GC5ug?7J2?)ik6I|L=N z<}`WMoo|u7jog2m7&h^8tMkdHrH}sYePhw}L!I<*f1U&^6xOIY;WPcY`Xi~H7yRsO zbK4N2IcM~O8m(mil&cgehA5il_L)?+TbI;keCNWRkHqgI^E^H}JOuM%A$=*FpieAx zF_s$y&@@I#|CljRdeG*+Cn3~wI0|NG1JAb`WDf>~!5mPUwQRr+825CvK-y-UThZOd z3!O+HB=vD#H`}eQrFrMOdNHyi9IbS=^{iOc5%)#NZ_`nAdb+WAB|5OIB4LZ2_}T12Fch?L;Ewh2 z`Q`mXQ;!y$=L%DcW{CrRsod2=pEa|pE-y|G!2AN7`SPy{IvX@S0~hw(#GGgv^LL`Y z#g$uXp9MrXuwAUn{r|!7{ zFaoHI0Fb=E@RV3f&^5O9&TfsX*^U?3Ml?eTooK5(Tr&067p`|cLz@0B9r`M3nr3xL zAYdwkI@f}5BSW#QGv9-2(4G6JZfj8{#mntuu{{!C5Uq0|KBKGGG zg6FMEFDKU}LIZdygClR)gVVG;N5DuZQ5<|00T*MWcK>c2=g1*eqlDR1yLL}CLgyhFqg2e}N*+s^{zo!@ zr3>Oi=NDu3C-hN~?@4|>DIqMTa^}Q?ob1f)2MZHrug)}h^xzwTZo;;B1Dv0MO=9+7 z>LKVcW6FdZ=;0>#x}f%5$9cke861Om1+ zC*NQC*M$!lga_?e~MA;V{AN)M6&!8lALOTEOvIC z+i~GL{LGG6($(m_6vcam9oEho3)(1?<4b9>M92wctbI*_qRWbq_z!YGHy8Mu9bk|c zFE%Q|-|ocN0`cy4)978g&wpjf!c>=i!yddv$zGs3;^GK?V|eer35Rk|=$M<&ffj3F zdvZ$@s!6IvgxoCAfkv7Yfe~s_ZH=Q<;dm_YLELLZCi6jhREktli(vb_uCP(*rSzq@ z2q85;1k4WTVD$aCb*ikY1|fisU3DU z2F65AacZK#c99tH;wZoy)L4*$PCC*h(99~HGZK~fxg$?Vy9JgKUc-igbAEZcknkO& z%FalS=`9(3YS_DzsXJn3= zzP`SnUp7n_yKS31s-rNsJ(LRs$IpmzNVc7>jJjdC%A>a|C#Dop$vR{wS&~Q$UrQ<#DU3ZsgzO~QvXqKUlqGAEElWf)A`HrsZG>jBKG#s+ z-~Ty|-*G%|pBMj^Wj>#|@9VzrYrD_$oGPv&{_ypqw_5x5y_8-wIy82<-#++m?Thhe zc^N(shBAOh$;i4JiA3JLaqYO4%+p6uKpzhlZbsN+cey5}EM}4PBYLH%!NvpoO9Ugn z@?c`iY9a$hvNTbbFmFUBFMI(o5_u!D}ech7@v$H*~$KEAj5&ijRA+GGloTc3UW^ohPQaCN?^ zDMm=`#t6d|5e31G4A##6{>bAvph%mhZ0nZ%vF6R)E%}_VUv1-e$8o}xbBPS3$?V&M ztn<&9nTeG$0kiveXG{QgPSq^VAaCtc@T-fCX|pxHEdyRd$MdJxuX3Yvdo2zH_T`H2 zTuo{f%Ml|whVn6R$n&enIY<-PVs}l&1+2G?m`eppbS+zKEREim zI2@I(cBt#ssohnj2e_)RN~s~nr~~ui+x3mC36%8O*Iv5B_7`DJ8GKC9{45x{@uPi| z3TcgI@HF6H+w35qnT_OmxPpd8Rg{lQ~b$|85H@5#pT-11>U)GmrVqs|!N_#V0@|hRc^h%!^!p0D|(D>nK-tvW^DL+byajcQRRGbXg z=pzy!DW;x8I>8zFqMoJ@LnAU$h5~KV!b=x^^}C9>*%mwM15}AT1sO)6&Cz0t!ayH= zpiRh<9Tsc3Yc+w8Q@I>7UEzi;yKm%GdU>+yy8lq;K(A#k{2{c$^UCD;joN~KYD>ys zJFIk;c)Ij*XPk>o`gm#e+B1_%y=11a2Kjn*6qa^ajNG!X^_N})9f^+zM=m1F(;n=; z`J?+8u$aF>hY1*QUrSt)O_ZMtFnrKynB9B6M=;+44^>OxgEME+#0NYqng_g2@0@Ic zo#pXkKy>X>x6shd4zoKoxqPb1A=!q09NufBAH4ze`U|ypvT`z3sY|(T^Q8O0$-%+WO1sYb@bGx7sv+BuSo>yv?lbw=0^{uzW^my`szUOpXek|* z^{wwbv)XcJjGdTXbyn8N$}pqbYJEUofBsgbgcZWU`LJ9z1{np90;b)(xLS8_t-{ZA zl`crLuPBkN4izl9dk9rA_~IO}!_A6Ds%X5t%G8WaQ*37W%UBd&Tuu8oE=; zqTx0|@X62`Vdtfcj+Q(|qwII@G*V=FHv4!PvENA3b?3u*N`V9_r(-T$kVZFBP33I` zm!dF#<4LLjzRCVJsr4d{3DmE0Dx zSqeu9qxitwcIHE2&P5M5U zJxeG8jh(!w8NY;My5$#PLcYP+Q8~dZ^2F{QNhV-6HkSqcc+#F7sd9V3E6^>ZY+!#F zj>sU?sogPY&n$8gT9i4G{zX#Nr3dUHNhe=Y z(dm+op;{U+$MKuw5Ux+akENHSy)(Zlbq#jRMm^$bg+%uf(>~ zZ|G6;XY}l-L{YCXqNT;;7{pFJqf3UZifL39l7d)ua>{uCGK|?Q0$PqL-W}>=N%mJ(x z*;sAu82SaMZs71wp91So28BX_y3Dv(c~cAE_pb^Mfk23&4kAFum|U*e6-qppCr!<- zb6C#sjrOvBc0Xwk_q(fvIumL#+{tB2`%H7LGhnpmf3F3s z9=6Kla)pFihM|%_zKgrcl56ziWgM}`#V**a$>-PXgwIA)^neh*qJOrR^{2=;PEHbK za1nX39uaPLo<}9o)Z83WpvTwk*&=ym+FQ-O84;Q!U$~HOys)*jyvw-iZutd@8}^su zKiNaL#^YeM+aKPj*?6GYwcHrkN|I3KnyxNb>XqDc%kv2K@vF5(-49Gg{EC^5(R+S% zxxD>uqIu|L=+}1$JBp;#?la_Oy;I~HgaCWzN1h15hfSE1zh9$8q8o$>LLPcnLzc-Y zBt}(draM(mXgUb^(EcxU z)+1IzYDac|yjy3mYI9Y1*$p9b&i*c`V@cqK-j(-8WE@f`L6c#qXhAExd9>k4AOzPDFSeql_L7p=HbMHh z=wp)4TEcpdWoQYASxp3cLeu`jB|sBH;r6l^mU|yVaFuEjmjiS`Dd{t+)3Mzz=glJ- zIY-dc_Exfw-K{K7yR%e5Coh%&{xo4~Ib>I*B_$`xumaR4fDzJVeqpTl3;Ob!9Qy40 zY>P1DM-JoRy;Jj5YBvf^B=duS+7Lk(UFYsuL`~!1d z^i&dm@k*ae#1HhoS#s;1q0pXXm=6t^yD<~3MUbqs!NssfizMz~ur)IamLqg%P4UDt zI4}%-nuLNZSQW=q^r_9!Z;#;Tb$J;%&?o{beM)m;L_|b64gHufOt(2};NuoqU%0Rj z!9czOdBE3)vDWXn2!Vq)KZ;x)2sNjC5y##Z#;8?XYfK&n+VRSa>(FwU8ddqp)5W~N z<-DQgp0b>(!u>4hJ(yj7)^@iN_f2y&voxrF$9#%SXe}rvTsgOTumxkN zhq}MY{M%Hj1m+-?$3QbclWUZd)&`tb?B`fzEyNAP4(9dSn(@^*Cexb@7d`i6;EWSd z)HaA7xXgX~??tfTbQL2_Q!e?Bu(R!4on&R=kKgXeif);JZ_1+)Q_we2)b4N>e~|if zGAcb~&A{k-2%Ji#nu6%j6wH4Q%Rgrezgxn2)^bTsHEh6 z`l8%JrTc%s{ey7flHwQX8wh08@U7BJw12h*?e9Hk0={OUhfz7{j_QJI`jJ}mjd(G@ zcLICJi6q&G58WO{T1&If?A5(r(ge*z3MFuZqTnO&F5Qc2H+!}V4%#=bBM+bYP9B|j zTxLV|@cK6J=HMp(3+93Kg?Ydn4EMM8)je%#LZz96+thl}>m=_@NU)n4+qS6^SRw{9*5Dpxt1OF2N+zvO|M@Q6wBav-j~n( zH{Tu2gGIefLph0={Hc`_7Q4oqrRI~FUKWhDn*$e&cwlDRYJOmj_JMHHDL=2Z*|~LT zgBV)Sobz-OcZ=|k-wnx~jyYOqIM(2hykm^OH)-Zl+hB`|@7g2|g05%7=ws!%yO_)8 z4a7m3M<$CI*OfyW)g<_G^9zD4#YO*Ega1r;!=Po@_n3#A6GEA%a5FfIx$NY0Fyh?_ zfu}YUu*Ae)CRjA%wCL6#GL>NnqK`<2!V@eW2~`(fnDpv2!* zG7E2rBH}q>N@|FTvq$Hnqe<6J{~?FbK71y`Y}z;UkPBzdL7^Nsqy4*Aj#UZ&+OcAZ zqH5{$_!ZEQw2mfZ@qG#nHT#;2e;s-LQ7+&)||O4Bihin%y)1G*@X2=GLF?;on! zg5JUd^=|M$-T!#thD=@XR&>^jZyMQH?bxuN9o?w<9p&{Pn>*sWlDHI zTP33KE?ZW(Do#5jgOL# z;Fp=?Uz?u`e=HZ1=0>YB<%|iEMo7Ogu!uu_30kuMbAr>%KWD!?AUO)^p){E+*Zf#F z4-Ub7_7-_8DixAD+l8NFr-L}71r>PqA2ExUE3Fbgrj;(orCNS7sg&#Y?v>3mt&5qxJ(geNwXfzuvhzg zKrF)t?xiTn?}U;pdpg?jimpHK!tx`Mu6{*es>JUXp}P+~t)#E<(qagF{c6f9VBE{( z-sU})#u+;2H0LM19e;M8^Evy`=+jaP{@Ev5X7wavtnbRD#KE=m2MTk=gi|+1iP$VL z3-i3}JV0ABSDQ2Ko&UZ_uJN(iv>ADK?1&*5vH`BCshOIYnKc3{Z;s3{zpg(0{kELa72FzJ2e~0jMlWmTmn*fZk42(I3gG?CaBWe4?uA02Rvxp6Tx zE+={q8v{r-zF{ifuUxYantO?o!7h`4$2v15#xd$uwr^5^Zh^&fY z@RiY3-J`}=m3>MS0B9xd{zy;=fIb8_rS76 z&s|khRdQ#)+~1$cQW@F3d_C{Yr-p+I@9sE$l?$eTM~H9dUr zcVNs=y0Vs()%cpYdc_$DzTtW-8yWvVNB&79n*VE=^aHM+J_7=t?M!%J+;VBWGaQx`H-HKmY@hWWIC(ed;g@9t2r&mt|{|uu+6X`#TY?Cngw7^<=&%Xu-leA zH?Y1I@Q{Jq8$_E^hu^p3R1>f2a(BGFGjH|FmFB{ZA|aFxZD>ftU4B50J$?EET(S?_ zylHW?K8$r+>x*jt^~nV@xY}63$r<2&u*K1^2uGZbcG{fI0}<-%>@1+6y1KfGi;F=q z^BRM-?8@V3H)fCNQpGh6vZ3w}#G{v!P+D`hw=GgtNA`;1{CNJLu$Sreo03h3agle-$&=uZx84pNci|b$S z9`%Sw!fKN5+}W6dgpzZ8f^*|8aBK`c(mN6vipE`+Qlf9@$c+YL>F{&uOI0xoku7z9kBUvbadvrY@4#OJ}!d zI-BD8uTOZx!6+2@(fiOD(x}L(p7iJ#xm(W8&Tz@lF?P^jmI3f5-p$N_mEwCl~lR@iI0Qiy(_5b@=e-(6qV}8qKth89t}Cp%K^%AB0_? z0N^S7WXv{tTU;Wh!@WxpZk53y7wKg-39qV37dv{8QkA~U+rNJUB4StpT&AjTsbvgz zqcJDGZWgs#ZhIJrqHJtjpe`_+;_&m^+1X{}ab0Vl%N?s1K0Ua&gLc~~yAvifxxg1v z+oXN}syMaS=q9k=uY$&aj0~@4d-(XW-KE;07+Zb}{A>TR4sB#_O$cHSiTnkJHQU^E z6nGf$E9Cd!w(O-RnQKlHf7yO~Vtd@X#2SS;{J2neLS;8l)ii$iRur zC9oc6RvU)?XZxUXpn3WIw--Bwq;IyIf@nAvA5F3VA)Ug*np5`a6)$yJRQG-!!>O_^ zL=xXSEimAP8w52Ds&YSD)B_oww-%+X^C+%fBQ`~*aYDVls=d!0@wMqWiE?hN^|-fpjnDqb zBWC9@td)V;)f22CAcfCat1at$mSM<5)V8%XwH&*#-oTS*m?BziwpW-6n%~GbRR^ZB zel7hh34hB*ppO*b#7ByZhY(9d#>oTo%1Z#0T3X8bHb2k+)SMu`eS|4Sp`ZS_B+Y$N zI+?smy5_TKym|Dt#7idq3@@k_*JEy?&yaV@q#UFM+WhYFe@w|*1Z+}et~B=Wx#WV3 z0!L`Y=10~MR2DH+R_w&#o0n{O2~ytX(;`~kh1uoRnEC1-o>^W)R>MoTWrl_d4?Vq6 zsne7lerDz$!ihxCdF9*y4e@NzTL{d&%5>=C@W_26--fB@eUj1`741vwV;|Vfezwt8 zqwFp0e8pq2*>HAjc{Q6ynogd)8wCO$O!m2?I=YGqj8H*j5`+Xw;}nKH? z3eg{1?wwWTm_a&TNzan;P5QDn*@tlIRnxAj=N=_dHA?(#`AB^DMD_+YYi|HAF^oZI z&pWYX+uz|_t5*BtiCnT(WFJRFCJHBGy4}-!3Iy_PUAc6KSs^o#YUYft=Xk4iyu2u& zKOMQ2;5~ZVK0iMnjBWDXGCW;(Uet1Ka*}}Onet$#ncF8J{l_TRL)(TV_l6o%oNJQI zv!^zTML5o;?+soDU4zCLc_f||`BLkWcHdU_ZWadyAIGM-MX)H94+sbRS?xo@j`rR(jg!F}$95Zjz0lX{(zkNMm=)JzWsM&i7Lcd!o1Hs4E~&Ys>H zz|Dm8=Sy5a-T##v@Q|&__PjA{bE_v`Du``3>>unK2{knX@=n>Rnn*NPMCXEJqi5$- zAwlY54EO5#R958dw$4gV?SPiV1LqLya2Jbs_Wk+2kKjn9q)xfKf1?F5bM@N@qmMiv zjQ-;_omfQ6cps7UXX7(S3E3^GXi-dz_GcfyVC<^>i*SPGKFUob4z*e1NQw)n0!0A$@dvZ`A#D>_L_D&aJjI?C%7(udl zb8I6w2ve~JF03RP2rhkS+-6Ht*gyLD3-p7`@RoCqgJ&S@_H}e1~}a?2E9S7~&s8(86V1!uCk8 zd-J=f>1}dc?z<5jo&_m zO;5L^*`Du~7bgyTSA^;c z`o{b^`6k)EKGj$=__OgB;TMIPS5#z=ymtGY>RR&#e#Kz4DMoXcN{Q1I4Ep`DW>aTx z2P>CK{YKl-jz{E0&GvtIO*o#hDLQi_Rp$9!Ge9Kdu|;(_r{l*_YfmB~leVIXUe>1;9j>J>I3CfRpw9&uce-0t<= zl&anG^2C*TX@oY}q?#}7+`a~*JwGHe`pt72dCaV=e13EvNW606#(b7$n4Q;M{*CMX zr8A(87rMGVXq$$g!r=JANitxc72UrfN}Iw)l_9~Zn?e~(moAU?Zeui7f%cPo@@ob> z`vqQ1&x~oxiiBTj%snKP3||rPxRz!myz+flHlEP>>{~tGqQOMr#oYe=a#wii zzf79!NU?owTyCSyf)OAZQEg-^iwo9eE+Uk}yRr80`vgV%P@i$Q?f-Zo1&Sfvf)@M{ zq3P&r^|*=bCV6|*`K#gg!`5qG1HOhV;4Sw|xqa4`(U>_xCny{y3mb z9${JioPVUUOIN01V&}UlrHP>3)%2GzT4WaXqMcPr1p=bfkCuz@G%G=~pNUgY>ly9W zKR|Q+PHhqQ`O`;%EDUHX*1qG|Lka}Q-~X%@TltPmb@GqcMR%-Eme(V8txVEhmF-?d zJ>zEM={s?Fwhr06P*@!?u@)`&tm{4lx$cPJCD|%Ds~78gRk)IeM{R>UG{}HG+jru+ z)-zGZTF|Toopbu(@S^d0)I8h+0OjBEBZJV&gEU~K)jL2d&r9EetZxE)E#;Jvk($_& zU@+1bLGy_^vm%M>-U1pkdqr`?*x}9p)QK9*p(W6;FR3&Iu6N(56p+)^oqxAXePT&J zRkzqH3%qn7kMhvY$QUzboMOFGrMKF*?AM{RjKab~K=@u$QDzyW!J}vnvmSv7+j0iE zpFOtnI}ym5B2+_Q7?3#Sc8SrxP{~il?uG(pu~8EXyi7N?o(bLj$D|iQILZI`yjbwl zi3it#oYYk32}byM4|NTq@@QXEdfcoFo7J^$q{vbi1YBJoC=h>*&@H6jJj6;evJe*C%uF=*Z?h2zvFt8bYV z@@Yt!^&abr`on5dvG3zgt0xYaUjNDH{O_QkgZ$*7sV4P-3;B6J#O9TX34!YndjG!_?x|K6P_s1ph zK1D5n%Rgi&|aGQ}g)G9|f>_h;Y950MHn4`fDU>XMn0;4ofx$ok{vW1#-zTz$q7uoL;)y6_Cu`P_C8SVDN{@BK%wTAekR>HNj4^hF@*rW1eG6k` zC;KvE9k~sSWqdb!-sPO{`_B2k_kHJ_Idji--*avA|Nno>6@Jr1kAsb$jgF3v!$4oh zoQ{s(41AwuIRv&a(z3?D4|*SSJuSLIyujRE+e1xbO**=w820V^hr#cxp8B_a=;##e z_TKb-!MfmufVqK=rlr6A5^BWbbaQzab?hbgZaCdx?=0PS4-Oq-J5_!ZfB8wnF;2(p z-p)p?*DX%DnhE2=EyTUfloxsXc=vuk&K$nr)MkSDY*xrPJs{^TW|)b)p3H5qQ3l|p z6VQFli_1qS@J}UvE)O3jz=(r>@6ua(jI>$|>Vg5GfR$R(M0qYD)GBSbdV5BuLgVhe zpdkAgR>#XsDTvLr-<}291by7UJ+?iopy2+U@W4@lrSBm+FuR$tDA9R#Jh5i|g;prEjew&~Q1$jZvXUp^Q9<`v)&Na=NB%O}0JdI?YiQtMlSVyA1HnwplE z3kyZ9;(~#==|(C(FBPxCi1G_%OyP>d5=45tpsO99$( zyhB+qW%5zT=!rNe2Up9{u26XbG%Pmw8*~c;G#uf+sqCQ+Nlnx|WJp{J!}DCx*OQlY}O)%izqAaA|7XY72P+B z(MbdBm{5Ms;1wx|$eVp7Y3{JyQJ3Pp#e93RJPCU?5++v1>O~{N;h2qFfBbG!k-a+Noc?GL-1hvk-kY$V^xVkrBo?>cJxT0mY278?NnKS0&Rwzc z+>n+v>HvtH{r&x33-)=IpYqC+dVC^67O{4ex5>NCErm%AaUdfrnFDx}fn@#gD64 z^ddt}QF?0j$w5;RA7O}kZ)_-a6jysg_)W))g)T6Uo=4)D+v-YQRWxl@Gb6y1sr=uJa%G|lLx8ENsj&c{1If%a z8$R;t)vGT=EE=sgbYlp4J>T__w-M37>#)HSvzYU?Gtg}=9tQLb^Ei*;GKal_vJhmN z=P3?Dn+av-#iy#lm%o>njl$gC4s@)ntZb31$)A9rFQrRDz`qDAId`ga^+f-%el74)^cnd?D+o+r!0pws9k4@TZ5`gB zl?8|0&8Zl5+WN4Eka|%5Sg-MunR+huOh9@sf0?~#9SUOt{QMQ36LpP|yp+m{@Pf6C z4TlOy_`xs&4N&Txq2AtJ6za?ka(Ksf!XX7m$msH3!j0qA12-X$9t3;?Gm{67%r~!hz%c{5( zlbGG&yY}|>seJQQvJP5Rqy6C0_on;k=`}Rh@~TDNT0=p2YO0Z^eJ|YDr#$7a2NaLK zY5V$n{_)7h6<&2lN-+LSE(Y)-j0cq3+TFelEezg;q&BdkVvup0Ph@g&Wo6=WUy{O> zDl02(So@8Pcries1_(oYgMsj*OlAXND+FJ82t*63;xtS85%?D`ajy zH4p@$8C*+jyPLn}dvfeFJ<5*-TxhGJQ06A*-|oHw==g9xam)uOf69|KRO+_6aUyKe zI8?gi(1?k#F-i@-UI&&H8T+2n2CElgVd}J>P9V@TrJz$%dhV+q*~RSBl(qz!Zm2a# zZ35Shp~}z=v+FncNm{3!-K()!Y)Q#;M0m$tYgPFAD{@t7igSv?-Mer5S3aeGu>Cb{ zS^U}hNC0J_=1}j$f_p(^V^tIqgJQJb=K!3VL^~0BBJzLcBa=^q+}%x>!nU93^_k@< z^lKB(XlWS`m&B9$SAvfmvvWn>-vj1N_WQ-f74JD{8?B%9aM!Ig+#shmaf2t*d~-v} zWFnSKw9&5Jvzy&8ia}Gv9yRG~^%9JxM!d{Cv*LTlqr<>S)UIVa6@yH52^Qs@VsD(WdO43v+Zue^}A zy`0pOB-3V7+c-O~-ZmPqRXVua+`AS`y9^VT_sw3H!sd7}9p0vEb4X=%QL?wT{^(+< za+kiAM#BwxOC9W*RciB3-LJiJsdt8IEgyS6$!De>Elo~NPCS&^O~zOIhcux%Gn<9- z;dx38H92U`Z1J?%)w>Q{oZ|t6BkhJm8!6WElQ!tqqU9Kj3(j(L+1wQ7$mz3%LZPatMUAGq;?4CiBTO)S9_yc;b zWPq2vr1s7Os}rPMmZ${hnN5(nY<}7Poc5%ZP|X_+R+(Gx+Tz5E3@&I6B&}4CvFGnT zAxKHc3rO13YFb-aO{e9uE?2QffTiSK)kFh->fzKh?9Z|9x9VF3nD-QXp^snYp3uS9Jc3xE@&g zLK<=ygk5eQnlPjY=p9nH@3)Wbs+So+u}4B}RQD@Ar+x0T1}PK>8^56MXbg~aW2J*A zI0PjpSZS)U4d&A#AiK{r4zNWEpCi`A%;%s)HYglP?QqF{n4H`NO(2bLgD(=j#+NPz zQE3M@6H(E=@+#ufaa9`YYm~dYF3~O>33%EB$qn;0@T&=vc6N50oSbsIws5lZ0DO_;F~$mU-R{ov(wVSGkhPIT2Ji{v>;F$Bof+S1(GlEP+`=< zAtk|ZB}A;m(<0#LwS=}gEo))=s21pv;E9R*_?4(V>=#Qrro@$ruoDIc*;BC@K0#hy zUe3-sye*ga^dL8Zl){{2ik@Ag{oI$gu9^6=oo9!^zAxo!_kPv|r`)WU`hdQ>i?B{^kDV2@|2iV)%*UB4AiWxz!*3qN9cd`20eo8 z&N21y@K{)Vo#)5*+t?ey-3P_D4R&B3`Z;e^Ov|yXuU5JiB`h^8ZCiuxhb>QTPx1mN z?T#UV@lDFYl5SgVR~&P@ODUi{!8{y4fE%IkM#KV9@IjOB__i$D%0i7I)Hjd}O?u&O zN`RD3$WQg<^flYQ*kan#V=CJo8ZFeOswd(h<$>Do$)1DRVAnCgJr|eRrl`{uo>w2Y z^3~Xvf~DTW$TdQSK&?M&LOBzsng%|W3}`UDkQU%jT*$CB6>%t-lA=Mx z+R4dD5=o-H86&d$?OWWa#*kAD)nR**SJq6y2!Yrg_NXpkZ#Z0NS9!7+IK<7M8<|qS zvhu0$-0%8QokG*R1#v`cYim%Z`@I|)U*@0oE>}b)_|0DK6pC<8kvY!xv6{9?PbMO0 zRHAqTEa>B&ZSwi^XUMiFQ^F~l@6s{-Ca4%RDcskDf<^nW_^JpkEUaI-a-||bm_JB` zxclc)+ViI#+f^thv2m;z)C=dcN9c~_0J4`L6KMkT{Y3VOjXp`OO8BPGJnTO2Kd?S< z%k%<;?2*8QPWa;w)v;Yk4lT~x)x{i3W@Ye|;Y!s>{(+^IEsi6-B?91ZBI#zs(&z<`{zIEVd-0F z1xrT{7?d;`EraugwTet`#b7oEW<;&J*HmTAV#6wlsB(5sMaCuq##6$MmtOwCYy^)o zeGepZMMp)oZcac$fz%d$vUqrha7maYOR11=fmQF}#^=bcW6EQ%6clHTvMx?^=`1}+ zC46DwIC5Aw>|xGVr5CLVR4>F=YUg2wRa8Q?=uHtG9;UZGXy-B1diKBz;enDwq38{c z5wnh-kjK2Sim}1E^Q!6_eJl_hUuS~eojb6_3zvmHlqJjp8#&vme;9HEm~GcpDFsJIMOzwK3s$7gX}}yO-@0d=!Uo({mO2@U<^Z{#yX6OVi@=ApPk+X>&5XZN zW*&bOkYi~smpXHe!C`wzZEeNViJ|}HmXq3?-!3mNODDB+feQlH+c?h7y;n;ba3gJ1 z)8hoHO9=oH;Mm8+q8tfE?IKuzTk6)8jDSct5zvzxvtd4H@LUDL#d?|8jbbBE&g_mOXSu93Vm5@^Ja$Ggp#TM)7`QS{+PA2Ap( z`)y;UBSi4~m8l^3L*n{9@{G`h6P8RDXW~R8No?@@(!uFRuL7^End_=u^W^(9PS0TV zJn!a@JM^d{XabV1#W;YOF`~&7@&^s)=BJy}*kL1%&iXU zN0^RXKk&Gz&bd|}E>|naKmLpTEqBr@*8~`{Up7N`D073i z$(2@IL6Sxw1c?%Pd3ih0>YcZ%G#$MJzdp?7#mXspN)%=yBFQFU zOL9ct`Gm*E|GYgS1u+Ie@qQn*yuAGN>tjdgUW_WT&OlAQm)4)iUr~!>4=%}-1Np0n zvSM{rRTO;H`k74zGhLp z_^{cPiA59QnJgsGaDf}nA@g>w_dDp(a)R{NCPn?if#pG<4%Fu7EH`Bv{oVKxo5gO! zba{sejd*asC=_2i5*uh0fz5kXf;O*;LxELfV>(E%{}V?Z&78YUFT@W1<^7YD)|=7^ zp!fA38t$)S8tTBr*g&if257xmU{$$jV6cC9LvLc>XTAT=;~mrQ_|A@w)^Yzk zA`8Fhm7YGU%Z|AfRmhw(4}Dj49%!`c1iP>BKj z_ObISVtedo&+L-LGw0#7|8{puRpSLYy8#h|0Mey{vul-9rr*EwQy@?tY%>gjKvX?>{-YXY zT;q@+D;(53yEWf#?!7L;3-QnfFI`>kUnHH%_T`=1HYgNrh^&@UGr72ni;I$0cz2!0 z=5$*WD>kV83Ac9c&y}*d zATWWYWQ63C2&8;Wg1_2cK$Xq3&zP^5hR3YV+bV*H3P4|+44TMex6SLMx zMg5L9JBH|cD13{gk}?AkE;#@ThI8&qW9ae0D7UPKm%Nl1O8mqy< zN(NwN#^6MHHjpyGO3Ih~mFO{Md~oe$JkE|9B|Aw4rgOL3Z=aoPZq9r?KRw#Ha@!;( z&7RXCr=g*tu`va{)fZ<8KiVtv|LibTzy70Ua~vz@TgLcO(74+EXj$D~1>^a=QwXy* z!otcLA zY&QO+&V3!P?zbIZP{6y1U#Q%ObMEAytN9{PGR8`#X|23Vx)O0MD#HfyJRRW^`U{n5 zMTkz~$I|Ky5o$vP+XO4oSp%>~-&Wz8b)oM)1X^LrJo4ZPDQ1Atc_>CpswG%ADKIh~ z_uLZ?<2#(^?OLs=%s?<-Ts;4g{S}*^k2R;FO0itbdp6~GI1P*YbTy$+k)t({`xso`V1&o!ZGp)!YX}6={)B4R`kru5Uiv@&z}`~c zIXgRx9%}I&vknWLhG2@NghfTkyJt65JvR$=o5!uG{rg&T{`DHVr=5sO7R31zY-e|( z;^-4(Tw)EMC$#p3uBrV!d~gIYHEK>Kow5q~f=DE(FETp1D3c5Oq2@nZK?@K|zkc0} z?O0e?*xkK~<@xLuVU`2Pt`Gvf+Nh@_|xBhxUmL2Qa#nP?pS`X}B_6mmamf8;oM;)&*~x+?Z{ z+lvtOM7{LM%_@?X&-`n5e~9AAqYTQd!4#T|??^a0^D#D(TZvX@8*f)uRatp@)bFpS z{|;^Uz-g8MHCMnLQAc@pvkjpwACmP{S!phk!4cxZL0p8GTQO&s@T8Z$h*tL8z16Y2 zr;s`0p&E24dN`Dy<{QNz*QnOGyi3UREu%b%%;?BBhhDG3SgplRNofv<(Iel#M+ux! zU&YmA+d))c>ZCFp8{}*>+iYK7OH$#|!F9pFl4L_lULzuME@dL1r z0dUSrN-Q}(7~lxI4FsTzcRGRsCA${*|J@I2KeEfp)E^-8-WqLgY`pKq=9g5WdD0Q# z!|*z{)zK9aFE1}E7lHp^yR~?RhLS+$1=X-JrXwUsP_h*k@wK~9T_n+ef!r>ahZ+hC zfN#-PQg*&Y^z2#%;z<5AA|MlKuL`IC45nwbC%Tj0{rO2IAccwwEvnA^qN0buXVeD% z1E(+0#svV%Zr{#DIJ2cuejgeFh)y~VrKcEx)kTt9i^nQ0Y)mys z(R;Wo59L%;2uo-kY|bRMM=xAu_q^=xcC%0^U;B^fM~d3V5#&^;b)&YkAv-xUwxEulw1b?DVRf zz+DBz(9Kw4#5l9JLBx%0fVWT&`=9}lAoY-QqN{;#{`U3uDnFw^!!>b~Ek=>0tadM> zXd^%O_xGzK5QdHox*Vf>Oa^oU^I+n0d!+pBwSeL4@_pnxd-5{hC>(f2xU?dmvAnAT zCw3VHe7kK(2-}YyRRdDNwv^O$iUrcQk&!fXbab?|QITwhX*JWgzJ%>n^QaFjb`%+{ zkB^h%h4w_3u3SkpQq)_0wP@p0Y(TX_ks#Y*gxgORUZyRrs1`cGFa@`K`t)hg%O!u? zxkT-UYVL)LP+STxauAD2zOq|z?bZj;#7M!@Xqe`+aIOA4q0pz zJ$|tcBod$mdRgLFr`|j%jRI4`_IeD%L!|qZ(u@o%f6J|Nc6Mte#x&^z;!?D zhuaGW2Ul0u!V+kiQ(BjmRlei3H@lYJXQvNBLr8a{y2Id{4-zpnmo6d6GV~QbMKw0Y zO*QQ79PDN=GhaP%zCxAwQ9{PTr~HwBe=9oUz$_tDoV)#PiTP!ny3Uc2vE)c=Yp&Q1 zBz$mm&-F3&R6b(cV|BT$V-V?UZa&h?r2kP$aVYld3DGsn{Ibe#4dJFeseG^1Zf|tn z@mysjdo{!yV>#5>-*0|-Ex}l5fAQmKTvjxD##qSI5oyMGp%=@FkH!(j$#quN;b?aS z@VtMSi8d>jmE0`Ny5qYm%Jkm%@F~GC9n<}(UD141vcQ)Ws!y~k@5`v)y6Z2~)YFsV z>849h7;Wjj`gC#G&v9kQ` zcoAs{JaOn3(%GV;+)OgaRW30XYBip!SGGPWJd#txaoLS^kPf2<;Lf=nHfkbIZ6rBz z+4J$8NBf6DMA{i&9k}4j8VqN@8RiBF)YscpwhwILV}r*K0>=hW(by7ZOve{hjH;@< zN1@yIwq{kt!;Q?gk3XLp5P$B$WZX!~*j))JRGz$2pMP0Z z*Gb7h=JC$%vUV|ny~5|CYZ{Y*hK{60$-RtW73}*eK(zKvkZ@EwwSD8g8*V7y@zeH( z0`6P>B|VMT0rHCr3)xh%@&e+Sl5e|==z7`+ZW>2V7*)D!<|pHPGa~A=+34I~s1T+z ziY&3uU+VGV*YTh!N`5*e{%)^ap9olK5C(;0TSDwn4i2TJk)xw}5*yF^Kei?f#d{J0 zX)Bjp*9oZadpj?4`lnBj=#vb&u)7ut3JUuAiLS7XP@sWhGCuS*H4zN4MAFbIUj|Z@ z5*rrz#UZF!h*)LM`#X;Nf`^1(KmuarGU!9*tS@x&hib~E*$xe{#g{yWzLp6G?0 z-M63ROyPEb79_=Nc4=&MROMp!e|7!;Rq(6ftt>3kGc#qWa3cSly0R09 zrh2#4>~d!TsbT9=oEFz{e?o^B8?}9x;J+J?kPrn94r=liQ`9!OhPz2#;)<&=Fwa9#7j4%{(46~|m1GaKt zrOB`Srp{D9^k7%RuyW~ zge10X&HWoUd4Eq)zA72qACaZ@+F2JrSClA8RxFXgWEbfO8Fd?GrjMbgKPeFdrwUgl+$2+X-eey zYT?^D^zbchAo2@oQFD}R)ygOoN{sD&OPnDN##c8m_%5KgX-H1@R%T!L=ru3wJ7=Pc zHRIvUW~0T1H%aHN{p;iM>reh05tRPw?H~>Oe<)G0w?`Yw{xTq&hT5qj5WW5VPnu6a zFmG&ZBpooR#8~2kl%{qtK!+=Z_wBO#+$N>%e}8M_>liZ9|*le+M(MQnfYuI zV(q9(BaZOmNPXiZ^J60;ll5*-{tGZ@aqets6EdKSn+Y+ydwZm>WvbLnE_5>IFnoDs zg(^h$s(BaA_hM<`=a9XK#opdt9d!4F2p@NdUmYv+aCaXCfT^pyx4Ww(_)@46J-_hh zmutBQJw>rvbaHYs`1Q~BSCwQj+KXKR^`BnMt{{`jlk*`1D0IkzoV#~%y#EClr_Jfl z`ev^d{~pYIeK{Yh6U+7*+)cn_c4H$Oe4_lR+}*2dKnl}BL6RIUng5}0I8Pl+IY~P| z{U!SK2*E)w=K6b}sVSh6!w)EkeTy#+jA(fn4^LQFnB&Zssfh_gO-)U0?cwj=Ll8sn z4w=9cl$$FmH)yvM6%!jD8#_3eIrp2YcLR3|d{*BWeeQ&Zc~-=Usu?ToMKuGht>lJy z?%;5V?gk&IJ^1gh87}a4=#>L}<`WDz2ZP0a%snc=TK6XF@GyhIiq-^fp*7+PpYHPJ z=J~f9X92_M1qGD05%&8JYyDfqYdcqPWMOnSN<{*m>&hz`D&m*(H=3%r`dU6i}H-X=!O6KR%YAp{4!1?&x1sR3vvtqU-l9~fCT!$$wn{`n7MK~cvRkF#=jcSjorFQ7Zny#{enA3EiG9uQcnsF^}&EaQzuQ6FXZ(-Ny9_Y z_pToXVAB=_aG_WGYZHfuVwIVt{PTuma$^acv7esz7y58Ebjq6WF;RtRxE^}Qg4{OY zqd-a$0myX)e!$+;)je=CWnssf>AU~hT$*j0<4BO7eUwdH(JZpNyUU>)n4EFB-E61t zErNhOQo;WeoS7aJg(x34-E=V1e_UJttG*(R!kW1@$Lt8kV?+8yyFfd)3i%r0^zH>7 zdB?Nq&j^%sj?@WOJPCV~J(j@G*(cEs3({t2PlwJ8n(qXjyweM6wg~;ga?h?#`S`Br z7QOof4(DZhH03d)a98Y=Da($)tsdQ@!LH`dVaw58)ot_KRqnHd7!a6_l{|x6=Dp=)aQ-4X zy1i2|aL%)*EMo~<62*S+M-J}9X47pn0cG%-obLACcSC&eVa)w9eoH@+`Mcc8RB`Xj z|E}mi@p?^%l0EGq8cXo@@=A|u@Yv+a;#59b!Z@1w|J}ogIu<^3(FCY>S|4A-B% zB71gSD^a9$onK(m13~p+sr`okg*}v3)ZK<)P@bd@9|BdroS%84BNvA?mrPFi(j#@I zRi4F@))$9&C*E=VQ7!oWM8gIW2e~gI@^{tzJTA$A0Sg~&saEaQqi;0Oe?UEeoQP{p zkC z`S947zP>)sV7XT2+krU)>UrGV@9zSV^tQ^b=rk3*$5R|%E2KJb%kO>h+KkEZx3M6x zd^3Sopvd<&c@eeWaoN@%RMpkbw-djf9; zVMwEbsZTRPSWw~ynNWv&dg&+I^C-?QSTsMC=+-tyn|M9KL zZmh7V=!TS^fCrWkCF|2ie#6IUp@RWU%<n|II9zlo8@duJoWD2FF%^qOrlE-4gL+ zS=6Ju2yDYBDz0)GKJ9n1uAQg$RAJ-Ub>KNHP*|X}g^!O(Yru7RVx60?pI_fxX$UKHCryf5*=24YJ;6%7xXmMAM$#pBlTYe?%yT8>v{{Lla9A~0OB;gf^n zm6TieQd3j?kB8HsL@BMvR0G1J5J~U-6i^>n9?o+%ABN#(baZqk>s)@l(i1b}vz0M3 zTUIXEfqee_8K@08Ik|tvv;)dpz4G-iB;^E3Tx{CFTmhf)0~Mi}%-jdgbMZ6*oqarM zKFBW*-@`e-a5GW)?oT}TT!OM>W@XK%dQ5`~Z=lybR$!lQu+C#e#YJ2{#m~QA# zl%gRygY3V}yktkOEXv!O4YggB_qT=l8Lwdl=aq(ML7hG!A%X73Bb~C=As8-ARZXo0 z<~;iRP3=`g`z16SyKiI>%mhb2B%vM^EHE}SwAkznEFLR0wL61?9ZxAg-W7G2JnT^s z)6p>EyJDmwmqFy4!@=aYUp&=#s)c_Wx}M`z;yU=^k5qLa9}|JCEsMNvYm_bh^@sZb zm1ctsDo?VEvnAoz!WchawsxFQ>`@!kI&LdgU^T#JF-VT@jopI^gP`8P? zaNL%Zb^N%z7taK5@~ky3nj3eyHv`g+Ww#XI=H>>^L?h7+z@7TYD$KcypPrEsIw*2^ zGMuG3@^%T|HR(YzBkua5y?iI8!ViTi!nuz#5plaiH0giGgrapy42d32!ybFkTILxc zhRD2140U>S8Cd7qv1|7)?bTCmtOI=lf+_yPhYuolqpIHTx*abwHM$v6Z7iPsJ!x%i zEvIbqRRazjGXRyqcAj72oGHsdU9vSuI)IX$m+5Ih!}0I_4@X^#n4Im! zL~|gz3u}sGxbL;mnI{2O4U>jS-%=Xn>0qq$J#lw)tMaNN@!(kCIw&9o{K^dIYHEM| z2k$#>!UX#t)d=7AYvpF`SU|NY^Zw^2H$vhb=_Smnffm`zJBb(TU6Bc`c(t>{uV1Q! zi(%T`-`CUA%gthx#>B8K0`e^`Ej`;ER_At5A_B>ih=@oBVn~w`RB<+hB_&PG&Hew( z%HTRJk%Y$-A+k%Y7UlUW1fUF)Tvu0@r5KGIb=jIzKxe1?`qcmEN2P7NSHv}Gy-dF@ zN1!ld&Grut949Jra&oxazPvO@yU?!cnl*!Ptu0N`s{e@E0^+7yVMyc zHo-FGfeRV0vZ7+xax7@;?TIMa0{G{$7+2!nLRFeGnPfUDafhWxuAsSqCRgqP z@dL<+kDq_WX9pqS_TCQ zAAYPCMd4kSdOiS+c=P<=KIjFb?CgGkRL9r%^YQlmkYs$)OlGwNI1n?@1F00}qwU4e z&``kO$m8L`!N4S)=H<%{(~Xp93oUIE6Q9{8!X1cc(%6NAuO_k!vV#3uWj+SQ$j$n0 zck+BancAd}4_7^~Q&6_rS?XO}T#N~K)5?+v;_au}VF2YT&gGycOj6R-Yu9AixP%k* z=!?P)a3%8P)W1Oj(`|khH1Q!W{D_KQ10+^l>K`Qx_g8Ng04o>@E9t71aOg2$!;O1h z$CPtK_NP9)aw&|v;(A6|*=Em&dxC<3VgVq#BV}(s&Uun3&xvKk3bCL;hG%so%@4Zd zD*QE$Gml$>?v|F7l@%9X@C0~)Y%TVtVqcn+<2-+DlG#&f8-6A#&U-CytL-O-1_lDI zu>~AE+$ViY<*)eou`nM4urNJqQfg|5-k*gsq(R6QKnuU^g-)rnt>_DnG@;+8@XMxi z{Bi{2GS4uHXR^(BZAPF>PS(6jMJ#K4!*@5E7Yx_~{yO-Xm)A9I&JG1q zIG0KEF8RfDkZ?H#%GSYnhh)h=2e@r#HXXijT2LGycRya6xW_|_1pRI3Cywgp*47|f zYHw?!-_fH^dT;m+x36*8TC(HO`-3mvRe_?5BfWbsi1gjF7!83p6=WTH(^#=P=4Vnn YSn=x2sJKAz9}on}3TpDDvKB%A2UoIut^fc4 literal 0 HcmV?d00001 diff --git a/doc/arch/cache/images/cache-multi-fragment.png b/doc/arch/cache/images/cache-multi-fragment.png new file mode 100644 index 0000000000000000000000000000000000000000..164d7d63f4b616a277b590afb43a84067703b67a GIT binary patch literal 47782 zcma&O2RPjA_AWexC@F-IMzm;?2*C(Kbipv%AX&H(?LHY{mZBhsXaz$1~QUwCRpM^j! zP!QvTe@QZW)C9iZy;6}DhvaqBEQ22iOvR955J*ADr4xNZ@H5Fv8O>J^2;XzuU%Xo$ zPr)~pg0hlgs!qDUPW>Ed7Cvo+3@3gSnA)jo5Qvd1Kz?YPign@nc{wK5h{NxOz*l*z z5qwqxJf3Y9=P*9!2(cf{;e59YM?5sDUpR+ZeX*hlzkHSHYQPUG!%Gi6@jeRj-;5)V z?`*6K9pCsl@;PC&GP~+?<>$(&{2hV*qGzvVSAM2ve9jI$zXVnX2}LBqa9?F$y-?iO zG^iPFuMibdIBxY27$F9?Qx7yg5)XV8MZCvC$Vr}*d`Kz%XgX5%BHrQ-SIQo`^9Rg~ z1_EK@1EWcbE9NGe&$E8Sg7-^ZK;qF;&|vDHi4CauW-*|9)pa!Pb`y)Q=J;Q^hnCQ| z<1Ep_4}mD@^(<7Wv5+p*W- zK|Jg)VDLqmoN}?fj-~6#_cG-rw8g`Y_3IyBvw|Q|ooTK#YF8i-V`4a=hs@>xbe--A zLL9#ELPHZV^7xXIX~}#_T%aLt#d^3Edw#?g-z#}|#|uHFN`C`+Iki-M)hcS+ha!t< zU%Z9C@Gb;`ru%0m!>w@)QnGjg;Wj`iI*M#Mbw@HVOMKp@v?K;4NB6jlP4A)9VDf}Nj2K%}M-tONlhsxnrtJkMc)?Uqq)k{dmQczH;>t&PP zDN!F?xr_zt2c)fFVCTx9dy>eHH zC0)*=it+qXjq_^oAs)NHe7|0J{hRj{;#pDkiNfCYqrFn<62JZ6((Bb87@3A-^iXtU z+!zIHGO4AIwBkWsRPK$ z;ctF>tXa1EA?-4C9+j!&;I)0cn7wJ9H4%`g)_Pi4I$ib*>_8V7T6yoD7{-IQ>JkIF zvAt}Q1*dkA?WoTKe*@koE(MP$d&d&%YdIw?x_A)Lx4=H88%@Zpnlo}riN$2Yu;zLR z#`;n;`!sYekyq&?Q6EJy_4%R7oBLu~{sz|Cm)qa2Q2__O0UVfXdeaLX+6{INamU#= z89Q)hM!f&HGF8j;^b5+?HlIs3=D&gSk$jK!K+J1AURqnD5qx9i=vda7;r0GKo^uQ^ zWpgkD)7MxYQ%qc(wwBfx8-N4?I73w7cwRX$IH;zlH#IfI834@w5L|nb1I?4S4%_o> z6B83m_X~*udWeAAL*R991B<_g^Ya_=3*e4%ElzXRBaJ{XSt(w}c*c1>Zz@)R z-d4P<%F+u}Vks^w6FJ@e&W+M~8O0jVdotSFOYd5$u_)QKM!|g_{yjsn?pNv9JRTuE zZ4xX=UIu}}y)QI1)Tnp3j$p1w_}$E`BFEYJo27%sZ*;t8W8Ll}M_!+%#wwN?ehQy( z&?PR+eR7k5462?6n(u)oeJ|@QC&jP$` z_YOBF?cO%J@iCQLMPi!)ywQR3$}-Ds6-6j+fypn^64?IpiYyNWOKbNP0YiaZQGly8 z7AJudnmTg_jVTjnF&yFkztZmm7$l386pYia#1Dr0_p2YFBXi%5!MKqQY)%v| z=9?nw;>C;2fwx>-G5l|LGsgv#?g{2L&ekKtwWVun%WUq44Re%%Bq8Uz z@2=#-1e>Yn99!WT?rnq5INfAP{rG^b;^PdkH3PPRW%9=M=E&{@j!ThKSAc_|4gUzY z^aoek+#y%@NIygVMkPl4ZM`c9%>8)a7)S)*#(nh*`8At12m@Yty6XIEsQ6nM+G@B@ zts(~AHZ4CY=UMp7gstl1a+_b3k(Zx~jN-mL^tB$V3@Z4+d1Wg#r6bQ1fuZdZKT6FA z%=)l)EsF))gI%L%L9OmVn|jD(?-BX*C=NBBXPnNOqit7LzId@Vqs&EkBJRxP>lRAp zV3m^B9=e?jdbb8naWpkv{nzE#TOc~Jd&sa%>iIt)NEn_N4Q@L`bV#mRw_)meR@?Et zz5J=y!%Sg!MMil10!YnC&%1||WK_y?HV1~IH+0YT{HmH%+SmQ`Yz3Nmn__;PwuqeZ z$s&+t8i?h*qtU|Q^0a|9mCD(CScu}`Xn*v=v0nwIFoMcnpf5B*+(!JNqJC7rZNhG! zI_h#k{C2-V;%h2kHh4I6m>)tvbK6sxDKIZSd=}32Ze`A7#&OK5i!{I`8OEGkQ6bRU zb&&RP{+l$a){U`g z8JCQ*Z&f^ur7fgl>?wiGv*eKNP_Cwh+?!uzrC5h?a^C70_r63LLB)wwwl8!Qf)g@b z#NA^B+*qumqfY3#lhtiCUdw!$DW2|!%_W*@O)WJCIUliT4=fgI*b>|pDAF&n{c>eJ zpE_b6CU;G?3j1Xc>bBBSib;VtRba$1De5R<=l+%fXbDEVl^0E)4Ov+_~X0JPMX zafL-8zBLayVx60j86hl=`cXlvsUM2|VvHV+YO_RISXjK{U7#wksDH7_COF6oBOS9R z)mBWVm|@z+r$V7L{HKSp05`V5-`G$&)FP+L)<_v?NYGRkrb64;af<8Yz9Wr^Pa15@ z#LcHx>%k@8NXL9Ha8SOyO++@ZM=?o1W-tZ}(Ms?i7XISwDKVQ;&`ZgzEbp!~iE|{B zhg!^f3d(Y%1s(#D$_&8)Bn_tUfTJT2;Hprtvo(n$GEoF7%hvhO6(V?Gw#gkzsYVQkX^GbTr6rmER?s$Q_ zs9tnD0DB?~J_68+KOX#TJiYE@B&*)Aq3lT0RblUmy8hl1GCoz;3jK`1LXUD&0eCvJ zoY!Y9Ej@h!8)x%5JG<)ac$ND0?IlME3mM@wfYtyqCnP0S{BW2IhXxN&`heFLN=g9E zxR3$f0RjU08ejxT1b>cH8|Ak3Jp$%86vYb`H{FY#oSZy9K2DVm=`VhO05HAeh^?rI zWwF8qCxGN&bTBj+6d=Ai8x0s2y!2UFaPb2%P7JfV`@ds7!$)jqV%2;_Vs%)!2Nr$t zIVzY&pufLgT%5k}ST@l#!TE!{6bLZ5Kmno|SU(721ClMr@rrOvy-2v#M$3T z7Ier6hW@wb_M@@PJ;J9?5wq*Y;5J@Zv;!iuvbwr_KRs(cO53JlZ%8}8?(Fy@?OR`j zjMx(2PkzJNWVCw(tP=E_O#$tE(_ussEqhj##_9-Qv%W#U{%9Y?}MgY5=(J)xd<=q_B!J&CN& z&h1K7fPZxj^>=hjSJmf%&A7^%!d**BmM?K>@=iJv_kDs`;8K&GQ88bK z*|0UYqPYd0cUA}TGdeYmtLm^v|Gc=fIZBvcRdk623IQ=EB_WMP$jMzYCuP3Qq2o0D z6>o;k;beI=h;OoWU^>LTdKnwHl&wm^nf58>OI1z5@9H+9bJS4wS8IEvRE+0LFFrrp;; z$*4HB{X8ixl6%8mfuSaqHutYCi>utpDYq5wi75N0iMwG6T@Kba-u)CD&hQ%=Qa3}4 zhhhhYhXXDskhF&f2<>$8z%vp=+!H#zMh6B4e0S&DBC_Hy`SJBD$R9p1Q{0Xap1oe& zt{#KU9P8oM(9$AxkMC*pJHcxUG+bL2W;g$|%kaD}(>&SWh@b9T_Naub#@prl@~9tt zThHT{m04Za98*4hk*w3N?T6|!MnH8Z13N(w*XUE?tD}bJt-$!oV)VX0zpF0 z$#7|TWhF%~aB@=jp7UBk%VR;o<<|495cf28=2it6F|&RauWOHIKNeF`M&m%Zg~ycu z9j3UWoDD;ugw#<>Rr{iJZ~9yEU{)mQi+0JdAq&!BXviS+I8PmQv-Ib&lcm3gBo`oR z|0Xo{mhG#Ywb9{b+9L&Sr^kS!e(z*-ye3JC>@;E>)^k4%yDQD+vb)q56?GjEN-m!$ z>^j#RlwZ9~sx~-0Oegpz5IP@gnxI)=;DfXGPcZ;A0{oc{N?=&*mp!N^mt9KrBb-$V zF3k2qqMub`}60| zL4x2+@<1Oed*#iqQC`z-nr(`$0weAV7Ky(D$rye$5`L!?1jMLO#ozBC^bVGlv?3VC zc&9tquFEUHV`7{bx*7=&D5(+ZMikM#p76f|k7n`9BTLTexNwatNWYJlZg9X~HJXOB zNy#IVQ;{6v7w;Cb41`}^1pC8rTflO0avx|33y~-tHL{?e1K;`2+shVn{cv)-+x}(6Z>We~8P5CK=L?05e@x zSqadR=;B2-xqeZ~pGP}C+b*-MX2EQ5lgju}gGq4-LBjuGQXJ?1D_tSJ(T3xne`vV9 z8DhuNnDcb@SN&YAvKr4p=30W_NB4#942@*2^~pOtb@r#_${+M~+!Kd{m2xb6u#%$g z?&rYCtE#G`qD*#Q{p?=;2*nsMf?ES2>Yp)L2gB_?63|nt=45rEFxWY9uj+U3j9$gu z!~0Cgkw-iCcLyyrMLTWc&%NkQrb~u(Q_Y{QGqQOD>nScSj)`s$zppg=T~Q?6+l*eO z2Af%hJ+weL2Ay6n_Wmn2Vwt*s9+9u@!&VlH9)XE*LG0g=rWftA6D90E-&OAgPq&M1 zU$1mMbX=eHdAipwOUqxy@v|a8qy+|4oZ0lZOmjT$r z&B6PU49skn>Qpxm#!J-Wcb8XJQ*V_O6`^uSJ;vh&UNzfgt+6J)6MywUz}sw^o(f|> zo0qSMX{3X<+DN+SOmqePCK-nAT(oU$wCRWL?Az#BnsYrvy)iFyFyw2y(Q}vSu;Ja| z6*EFT?aB!Fg5}C1+eM@~>)OLaL7B|BYS~*`(*H!rbl9utvu)E1(vT}t*KJn=S6p)H zS6paqGLP*W6ta!+?0W38V7pTfyGm+mY9d$_Crhp4VqD)-KSlt=NOM?BFNAyHAfCmo z?`y>Ry3GnR&zTI!kAV&2{5XkN1{El~t$wsN(#kH&X?oC@&ndE~fdrWe?;>o}!vFEI zM0m=&RL6Q`oz=+j{2Y;9Bwv#2JI`{m2d^mh<&6sB4f3+tkjfcuU{A>~dY=C#Il*#f z<|&NGYd5@Aw4nI!ou6rjr5~M)SGw0s&ULt!_iBfm<;Gw)A;(R|vDIzpfZ82w|>|3$SV zP>Vg!x5pJHWn{~Fr9^i~P7f3d851IiICRRt7&Q4WE}Ga8k_q5NlB3D5cy9JekS#o- z0x34m#{t`dvEhHjW)4EH4nu46X0?zG)axs3{M&c$a9z9uYhU9r>Wu)^iHN`#;TtjSP8B=|N5W z@$uW8yALA0%2i8KQ@vO;+I1@(mQRj$;2GhRz89rJ8&v-TGAc^&v^2IxE&r=}$gnR( zPi~K^u$RYcw5mY~qbbQ^!p*W_EdLE}mhC<6?--fw4hr|HF^o59Xspc5lLZ7@S=HMi zm@j)(?cp@Ns)%6mg%q)yC-PDnGP?i7Z_hZt|E8{heLiFc0!Js%2#q}xv1{WMJICn0 zp`q)NC<3@CpIxh#k+MMl)Rdy9fv%)HWX)$Y!l7Nf@ zIK{E=|H2gwq~>__$q%`Wbf!`D*91DqK7luDH@jx-SUT3%bTGBYE;@zUsqT^9T4z!H^m)Z)g@&z|oCGOy~)$muGx{(%5- zX1KuiPvz&&5qb*x5rInr@#g*ip1tV0JWVwV*6`|{Qga38E`dw=6QT)0L^S;H7m47y}3mK?gTw-Ca* zLrY~lDhj&AJDy}cH#+jS?G88f%}9NzaWd6CC6w=_74_WN@8)M$<_7 zG5@PYc@&EB)S)r# zL&X*&Uc7kWL6DxE-BGO?MX)g26u@*p1*pgbK$33v7Q$?2e?v=>zzd0$x&?DdSd}0A`5U*bGSj*^KkEoY3#)D`QfTa*% z=wF$)b1G&qt=E{k!T?YcCD+yp*P5~i$Qd_%41Afz4xtzS`&POxX&t*eKV~ktM}lPN z-ZWj>-P>z>CwckuWuS@2Mz{Arqk#59b?#jPb*(=uv1l)hcH4S`Fi^AP#)yYXI5;=} zhT^Lora z#GaWvFewFq@)3!5o!%zur(PvMApKUIE`5HgikMAUFdu+=dWQrj^Jhrtl)n^n|FL~T z@0}RQpJ}+jG|Y_>7V0?~V&ALTRp$HA6>{!JMJSlxfCZ^oi+{Cbjw&)iNK@yYT2`NV;A%qE3!P4#=Kda5o z4mhh2v7h&4-)1bsp3G*jq9}47SoY_@8bM74h59Z{=jx5KNI!6=S=jvid;rr`Rb$mA z^&vEX3a|J7bbHIq9Oi~(XjNUW+Njk7$}=}8(UE{^32fm zgV2&JaG#_nX>)W_5|MZidQeRZeGHVS@8jd+0|TnPG$csBcyP`AiVUq?Q^3hs0DwsO z3i`#C8FKpSVET*aM>3{lWXEgRc`)L=ir(k~UpgXS3ix1Co^sI~d;9we2|^>)5gN&& zC<9M7xY50Ymg^A6>_rTIxb*l+F-UdJ9HXUtE+G9PLFRhQ)pi-w9!bqeWhk~ro?-CK zN28>g!hs<5FI~e_83n3KM#4j28U)ZsnHqq^Bg2j_oWMuii!M)g#U?(Bq2dG;Tu`xQ zguaB_}7BTxNM6J&xHv4)_z%``M?qCS5jM&yIfD-zfTU=*w;M z`yvFVJComC{56W$)Es(;*|tG=!kMdvu`#`hrM0yQx;~wz5di8O9349@L+B#`1nb0r zal5FZ|Ecl6)-)rvTkO2W9VxG;;Ij6k0K^iooHPm$Y!c4S&NgvNiO2MBuk3ii{08qo zLVv`PONY=}o12HlUIv~)e+ouM`;L|H^6`DI8!gOK{f88;&;3s!yh8@%YfTf|b{iD% z`hl5C;%))d-WaQ&YbVe5q8AAtoc>#}4*5&5&MgJ0rrJRcCo8hTVYwgVc!tNj>f@m9 zCj-m6h<*Pzw%ZY~5P=&q)rUb!s)mO7B|gO1e|}A%ca^U1Eh4IXIg8iA#ppJg0ia{D zI~F9k6m=ipf~<<)edN-=b4Q!VUnUs)7%;fHb=(ovu|)|xy$N4;-OLASTKAJJ5INeO zcM1dl(R?=!cI&Z=s>X(<$%rgxEwG3aJ`5){c<~}BIa$BSp9-eA*^~Y^^!;CRLz6BH z$P6d!J01Y1H03#q)x{GajrH#|A_9flW!6Rugko-TF8WKnhIUqN`U?T|EOk^&!>0I< z=SYy2e_sn^#KT0rkkTH?O{Iy(w>N9|be0Pd=23ETc=x*_NKgt>-I%G%KnfGnEc*Pl z{AXUtY+4T>azMug-T?f29F%4q`HDcKK~Jyq_syol7UOPBeuBs(yd1{*ikMbpf^tEO zM~wS82Ae6aK46hsy*se@*qlJ62~=KjO2NtskZlFb)au@i2G|R#3*2vx9J3R-CbaPgXVW)w5FuH^lT56 z5#F(59q>kM58Qnn{tpFv^E>n#IFTY37PeP(3f(UKA8X9}T%ud5s3N&9k#ATCHPp35 zSrM!Tc$zrZnhrzCCAc03p&KVU?9KiCoLNAW1aC;zu(ksQ8Kwktezay#%EFzZMG5Oy=G9OX7TIJ16gDH zkjtV&zSC)n^u;1_p(IQ023rGPb50Z!6;TRlOzXNsw1Dmo1iF#(^74X1POmGlwKH03 zXd|@rS8Kq!|Dq5KPA5=DSolj&6>~+(FU1+9?{id(g^aWhO$~8xfD^r9M*7lVGVg)* z@GSr-iRXush!lF{wO&O@otp#WQt}OdDD*)fDWHD- z!s@2x zFl%l9^uRMh4t2e_+J5&_z2Wzz{LWF<{`<9-H>a%sQW5>+Pz2Z8&pBrIxZ36J_ZihETzWc*goIjyY2cLZXXDqsO@-=eUcZ^!1$^Q3sK-63 zr6T}v4p3(S)Is1?NWm2uTU?6S0_hDo-;cn(vt(DE?ri2EIVd$QYx~|nK zC(65m%AygI`IF{&oe46+EP?6w+YJ0!HZk!q_be6kXUs<^&TsW6Ii~=oS5(CF3b!g; zCGxEBMuKbh=8wZqpJONPjk&gLZj5(b3bM;C?K<&SS5wNVc6r_gm!daH-z{5{JO5=? z!z)#%MjiKSL5MR*>^tF5uk+n^ll~(gZi<_Ku;sLPVcli;$^x?Y$*?LmUd_;Ys6e3T z0pu2wAZB}~i@3+>te{GW*XQg#w(P}m$fNr`o+PpW^2>IQ?%H0bO@jqhNgnq#=L1<% z>Tg-{{`$QVEOFxnazsV7-F#~p$TC{x?=igI5n67r63da$;Hg7|QMV+&Y z8(yqCdXe~Gah}+MhH$X(y+&*)Ns*uHtI`CBQBbE@`$n*p|KoKU&U1gij%Xo^>u33Q zN=?i|e$r{L_o4wc>UA&JC4-+9u_5lg2PgDmvOl%wGL-htR8$?UFIv7Q#rq>J7l2&7-L06@MpBSYiJ=u6};URI8Ml-C;N)t zO0ORWp|jyyzk~{cNjqYxmcC)|WJrx5@-7n{|#Yl<4m&2n{Fo7==WsIc;Yqz*N@5lS(&fk3xsyl4r z6cSlR34jdnKc!~#J}BNDx^R*ni5uYn#DeY!s+;uV@j!pY`oikoUWAC^PNVwq%Hi1| zXV6>OG&-8syaL{AMi}lDiz17*shd;BwSyf|)>x8|ZZxd!PuCyqs$ z-8+O89JaEFZpP5GLv!<~Ub87DE*6f9X1m1%& zi6p@&eJSLAL{ccWRi0x04YXbxHD6EkanY59Y-QOCabbY*!<%83;F5CvH~Oxf22Z*k zYsG#3RiGx58ED&YNylHxy03J9)lSUhhPDg2Aa0USEV~45j@Vu(K8At_WN#0SDMbgO z*`-Yo3cAt^+qu{;SqW3V>gAAO2!hRd8djy6Nn_c+Qi4fwA*9n~axRF9Hp9&RAd*!; zzE5VtgEQN6r@{m`Dlg33vNTalP}Y4Y8#;R5KHzHkTV!vs})7BGO;bP0P7~aQAH&d3sE%Z zcoDgi_S3@OlPd6Kj$?UOeS^R|UZ2q~&9u)&Vehe$3MT3(VRxHL^*jkvlcOMHHjs%3 zIj+YG=~9uvnQcHazx;szF#VpNjtKR*GI8P{6thgoM4^kR5ArZ{f0;BCk>$xjde3@) z{z37+j-#YHbzg%(Jf>dom6KdPM-tk}+<)JYLWEg~c%%<)T1GE4cVwN*gFPDq+RAVD zv!3&?mTboNSTyAAd2W6_+4k3Z$-Kk|t;)o7fL}OO^QI59%S34ji=!Ek%aTsbJ7eq3 zUE3sP2y=dyefFAb2L}fo)nw^)Zly|pS^BmmDT;WHf!*3Uh_l#im1CgEO7=^Huh6%T zg~ue+1tkveEIJb?gN*$~iGE4wcZZ=wLG<|GaHB$C1&lIz1-eQP!QHqqKMEnp&bf`zbVr`@V&9aAaiB@N1v$AA_r^g=xY zy(jO`v_2;1bw;M6hkLI7WFt8J0EC2M(8C}+cPZ=(xBdDItpVwkXhON$IZ*2C!2#Nf z*}cv4)J3C|_SAX!!M|4STmGiWmQDdMnJ{uwdt7+3{9RkB(CJcn_W8lDeXSmi;`xqq z%a299Jf3Cj+zIXuw?Kh80Vhle7EbNFkb84#)fX%dCY^_3Vd_^X1Js{$5UaCnEKgZ*zkROylWlxUQH51=uwn4o`S){l2x{>`*J!q_ z_7C^51@5~S3;2e#F<#QtJtEam3_iuT9#zB}eZ6h5xbA z&5t4JL0NoB(7{a;Zy5c4(CB^(uRUpNF!sdZxqP4dnBH^OF?{Wr!aCksq0+>RHlF6G zNBb6Y`B4taZ$JQy^Q)T>y8>e5sbZO*uHc#9GiATr0*hPPLg$+XKo%pllggbt^3>fJ z=%egcCI;G0{KL8vO!tp2Y++j<_T9dNi%!9=ZJ_PM=jiI)hPuk*kFRof%gz>8I78h0 zOv~I?^RJ7@_*Dedo$!ric^_K@>W!X_c%}XdQtS9J8{4&0_j|$)05s6BBtgB$>1&}( zi?q{UyA>CxEJcUazT))5o#58WkJ;aILAZz&c{X$)qAfnV?W$aLs@;63E&cp7U752j z$84ndP);bh1ih7%T40e?<_CA%oA!*eWPRh{0Io|Vl$j3>_Z6(m)hI~u6SN3I;)Ac9wr2TI`>nnL@ z-`KO$msTyTN{y)f8Qk{1R)5Gke`MT_{+Z~mOUnsCTty!3UOZjZJAVt(Es%GGr*n}< zrsA}{zmU6@Xy#Vk=;LVk!r(K#nz>*>gHN=O)+;rF5vgkvRnHa;A;I`6SN{Ubx+e~N z^*q5gu@ac=K54qwcJmAulIBC1L+C_mD=IV>Q!fVN15J$CzAzQ$+Hozx&Y{C98J75N zTUJ6sBJ^i2m=)B)KYjYd^*jz=8ZVZ<4*Np{vX+vusU1Cklk322l(}lCB_!a-d#8T{ z6)YAA0NC1E+6*^QMo>uTvz-Q#8{4%Lv34MVzaIL#)y_eVL zYZjf-ThKeQH`e!bPmWj`Z=_xVg?5sO;UX4PGN9MD_-ZQ&x|N4IPs_zDH4x@=nf7NB z7HQ<#*X9SHrY()F9{{l{G<(wk#{^zWC~*GnT8W(=AY))q(uv3`vHm%cPtj!E&EKI1qzkEnXcfmA}xi{5$3%w;r zh$X&5??p9iaf}X8vhrZIqw7uQ1_gl_989p>h^mVt%5d8{Dpp%`yT~=NLrJVA#=xsnQaWR82k1{dPpz*++ zk}Lr|ZGAoJ){Uko_ihE_7Y1ZtJsygp+Hb`l;}rmfq8B`*z#|-r4b5qlhfK54e-}kD z0##rUY|hsYu+BkHTq1kA?{!-q)FDi^@ucxIi9i(YMRRAwE1c?7J1NT>e%q=x!Iu=5 zmX_AjOYEF)+|o1_X$+A!fwZy!M*y)*#Q4?9&*h9nEQzAL3Eq2bqa=_|HcbzF6MZ@l z$Lv6M2~Smm$EqDNtJsCgYu0#SZ1fuN$11nrMkIYEtX zJXn3TrMYO`eAUl6mH)U|I<++Tn$v6RY|TtH!5OvPCYOUobti6`UNkA=YKEIXV7{7l zd}oOnU5CTy#%^$X@K>mwvex zY?+o+B|SGPc>NMQeYSNyE3vQ-szUTu`gi{MF=s`<7SPR{PWOgG?g*BaXRIvsD(#-F zI+>{^$0X{1NcGs3;P*egt%S-H&5`kCzHA}6$->xXzjh>;7i?0piN_=N%hIxhYvdvg zsUhs*1`AK{HOZ_BO=Zjd?--Z$grh>Oo}yKq}~XCkcY)$n-KB z0hKr^g%G3CXS6y%22WR7dP!x($|5y>RRomqlxPnHNb&xxRw^PFB{BnFP(m;+U~u^i zDEZFLvvBTqT%U>LfG~+)m1bZZMAL4>r4Y=cSDURy2rw$kMoWJNB^`urolzfE&{CeH zk`yY~NHeJH%3TG`FcRuu$-U^27R3Y5rZUW_kb%8?^=iMZcuw{mP(d``eoQ*ECD-jB z_9R|$e6y4N+QAFTm+4>D>sZEQ`o64xeM+cmT$PH^&7V*X3KFduxt=*t!oXW>wRrp6 zO46gx`y=WwX}@b`q^+Gdo6hzrrXyJvH_2=N8IE!ZODT znoQ-XZiOpRi-yn3%WL-%3lGG|+mps;a|l|oo}^A& z3`;b5=cCKiA9You%jtc4FnwlvDIt=s{JHOj>RJJVI>UF}yD?AVLvAt&Jq-YY{1=&{ zEKn@lJ{|)#l+hthge0ml*SsrRu`$l}9o>qZM7V09F*SrS8Kfun`Pj68f%TX>;A3Hn zC=mv@@zAR6lQ7VY936=D%{U45Q>i4teMr#b=T*v>5z)P>5AfaaqYQLuwXV4O!aWk&(^O9oBh^ z?4yg((bm>}d!@+-MlS*Vm(zUpa!5Fa>7t^#-~Q^l@ZRvu%-K9fU$w_^@Iz3)rGEw5 zK&4UtG6{z~o+d*eP$qD(4MNuMyuuofT_dB}ktVCF7}mV0iPlB!rb8*Ym5*T^Z??QH zp+s4Tfph(F%&&3KgkzFaSbpsYHUDB5b~k23f{!e>HaIt5wa3uBpj})kN~=9kf9bh> z^UCW>a#=9JH*28=DO}Fm^Pp?*_*!(Qq@6ygjL6I#ADKife;zuW-KC{pxLk+e`1yiC z-Q^n?{CBcrS&8Hr?w!H+317K}(ILq}ck=fKukz+uI+iZSJU*%14_?*qdY_Qc%#bQb zbLE}k-e|Z*I~s;+m0QX!N=;1#a`mJrmSCy9B`%sUL^ed5Y3t>QQTww*&QchD}JWq^tdYXJ>29YdoCyoI8RBP}5++ z4!qENDfZBM6sj{W*-v(&lJE9Htof$&ae-BKbqD4lNsT`O-aw9dG2z~|XCyZHbbjH% zw3YEdD@}YES*09B=_zfG+U@$c*OecJv=hMz=NZ-hI7sbR2Dp@QI(E~cr8PN-+Ue*+ zR)e&HaRA$A&FON-U!D|tag019b~E)pfKX-pddv!uXLNu9GMOVAC!xt(Q*;}Pmx9T{ zSL_?_*8UQ!w~v018_bx_E?R^)EdH_diFi;hSsq^)_4XJu$^Ej}07D|Jg`p9%!g42| zCXp=*>`{uNZtDq@@V$fw(LORyjQte{A83D+wGhD|@iEA%sIa;B!`^3?giJW{n+mB6 zPanXtA!y6xd{fQJQg!6!kSeE|gylW$&p9ZR_anB<);={B5}u#mJ=0&Ug?s-rOErmo zMdY0*X#gIa4-MIk$DKT9;wYtau^L%yKWqQ1r&r>NhjmwP9OQk{6To<;Lqkm%9ifc9 z7XIypM%m{09IU;&Vx?%!B5~$%;TMrgh#>LuO77xydkMEHrh+$0ZybI>Ry9{gvs1v6 zStn`p1B#>Yg@r437tXJK;Ii<_9%8+RKz%{utH{v=i#wT-CN>f-#&aZB8-tNwTu++j z{ha!`!jCOEkQa-lDB>CMwCYSUDAK)u6(?#B2q!>puz;NTD1v%kTT|MMoKJsTLDL@y5y*j72jkqsprN%W59$T zYF&jSGy+8cE=4|5D}fSKq7c~wT^MKrDZB#&-ZCg&F{BJYzk*~FUuJ(maSKzJmV6%c z9Og&b2OYmdmlj%n$HxPU>nwtb76SV*YnQWKL&&OnVqd@r(?Tiu|B7>@Y=H!dr{3T( z8qf#+#RCRjP5hr*$!o-lsV1629a0!V}#vk$q`G0od}B(@jltdC{NTP5voY0 z?SFrpS$hEM8&F>>jV{85*7c9Ug}rHb-XQf!lWDufasK}>|I1Woy_M{yU<%KnlDdfg z-O{MFs1#3?F=Z$kmz(2QfG8J}J)p2EY|KMwz>~x4qO627U$7s&nEVQDs2TVs>2)IR zOV~qv>l)BtgruZS80;+|BVHx&Di!gop#q3Cwnl%d`%82Jv3Sa$WlBor_z(OxgSDlB zwedw}W8r*!I5Fw6@3mnI|43uEWcq@KK2HVw&{uGEU*3|B-|iK29X03d-%8mH#-9Zb zwE3s);<_b#R1nCK=CE7joM#w=!G{H361o__7&Rbdk#;h^kFfnv*N9t?V3kK;7hCAV z0Y!e~NJ2QmIkY5IYvT>Ob~nbi9H)o0!*V}4 z=lb79*(SC3U!_Tp2PUP2-UjS~nI43+;VRz^%KBX`kT-JbRT>X)+rlG!2^LamJbX+- zOW-XY>*hL^n`LL4QSQ0mn6Cxkom*YLQ2T`5zn0y+N~zJUx5?>;=)>8U%Mz2vMT zng))ivC(lQBObosMJDX)Qb@%TVV~v z(ulb7GxyaBzdi0Y2N}c_ve$BmtrRA~9NI%e<>KkT;!!qd4 zTMKE7rx}t*})4<84Oa%MeWh<-Gs$ zYHuv)U`z&!%g2%m$EH{gwhjtB=7PiP7#Nl-jySh zIE~bw(=29)9I)Fp$=55a-q=oIw@Oa1T{k-m-oG0Nxh6i5q3Lu`EY**DwLi!3VUAiO zQNLAX>6TlC!ff(u?JeeBU+gK3$alzm#MM&kb%0E~io|hw-%pu;B1C|;37y~sc#cgj z8}~FNXzY}F-JzvKLX31aexa9Tu)Z`?^84}A+q-ILB`rgTmhryk)<+i2wJUcF18a!D z@!?JuA;tv!YZz@@bmzuT)|;>aD0eHUF-!j5rTa6|(ZHmN)b~vNi2a1osxp0F<%veb zqd+tXl`8M&QO%o{YtDriFckN3jeYfrAV-5D$dSoUlIglY_HRV%@l^&yyed^L(}6F3 zE}90v(E81pS8~2s=jb4~0vS+O*WHe$8fPX%@IIknL7*!P`*e+WK8QJPLAn!FBFs#!*E;8(n{;cf8#aARYSmM9lVA1C=n=lbHxU%MNCy8cbiqvo2!NC4yQN*_jp z3eF5$BY2wh5Hy_nUxavrS%MK)!9$$Di148R0ziuOc(sI&j3D^v02!+j7Cb$(mBUvz69#%sFkTT^y1;Q3(i^q!YI z;f)^8P+%=Hhx>@&=f9I-Tfn*7lp6meGb?d5?+t0q5h1b0WJDGJKUCcy12jVWdXacs z9Y$|)(oCth6M|tJVnuj%(UJ*sbDaPf;Y9>#5~ry9mqg;#{i4t2==MJ@^mz z^4*`<=)C^@Aw802cVETvw{Db3bow!_p~LIyYN9ML|@sp z6v`@HY!p;f15kVNwRjmd>3)8yD10PIdxIRuQ6GK-Xw!>sex~s23Jo&#BXZQ#)OUg5fA~9Q0WwLK$Pwdkr;`gMU?JVx&%pS0bxKShwfBL8WE&BrKFJ#1-^ZO z_x(KYd;Ib*4~HxEwfEXGfrYR*m=#wiBKP>1OoOQC8&9EQvf0KN)neya927}vyyaVCw>p5#Jhz0?JX#5M!{n+3K zsXd7a9p>^k(Y#_tsR>c5!-rHiP!E0vdGw6JOv0hbaE}z zpPyWyf*OR#5O%`PIdwFE>F91tF6z0SJ;%1ZwtkfL>J`r};>8t2Rg%l>GKDiLN>zKj4!%<)#b?xz)&@sA;!+#E*lW7d(5^RTv< zr7w=VgDpgl+bt2uTz3eI>5TH~hXp!=gV(Pvq(wCiI*YEPS5_Xl-UdM)pg978;Qtx$ zkZ#$)u(%M0>_?(|$TIYO9gaC*zaohM0zyCs92P2ye5|ajCD*-|ZlGqX9@q4x{{BLK zAeAv6LJthZr!5LHQPFQYOzt3b{$0oxYr`c_BSTS zCnoN5zE05@$RA~7eZ%vhz3dpeQr$EO}S~>^zP8FTmI}Aq^|R5nM(-ojb%YEaS61d~MdB zS#x&!8zl6CgdWhUfUp7pl)@f^%+rNAd-Fnq$T}V&sh*ca^!DL;XWm5ZkXr)l>+rkl{_2S?{g zK9fOs)4PD{A8V%7t87EI2rMiaze*`i8(#>FF^D}zFuy_9><_&jnft{m88F>v!ICJ7 zG7Hv?QrO*{1nUh9)pk5k{jcIT5Y2ngMjc>HE%2u!1uN54*Td_6Uiwz-(n3T(=I zlkBgm9(OVz%rduETv8Ekdof7Ci7x$c5A4kZ5h@fB1GM;1$^W$!cp=`}Tj68#fa`^m zZT-*zw6CncVQOIbwC!>6OM24DCF@&mYr<88f0yPzrsLlk0S4npnoVC1uo$X-^Ku-U?5`i{lW)Ni^(%CI)GRa{+`Fu+ld@)rt!I9Y+Cq z7va1d`t00f^F3E`hW;Q@xVl%F^jH=s;L&8Rg)OuV-hfTDfw6v} zk2-;0NUQe0O)W?c`8I4myvWn7z5zrLqX0e!v`2oF!SE^1U-`M30_S{AwIq8!P_g<} zz)tk3^fXp3Y;#)Mp^XaYJIHS7ym4;%JZ9T?Rt>T2U>tK(A55+QM~;PmX?-C~&<6N5 zK*JhhfePqsAJ0&x!J$OwpV@L@7y&eKJRTjtH|rsagugDQF?efeVdB_R!(9Zqd59$^ zv2n%7-_QC1-BWoDaRimr+FgnSdL8Fuj2D?Xdwh;Gi|2IY2)!06*pv;Qxpv=viRdV7&02|G8gN{@e19lAlG*I%7F|YqA+1 zvi>%5?JxUrR@9o)K{0UJVHQB;_bDy#0VdeJCx4d>OslTz+!i!82Y?opAR3Myo)S=c zaqce!_kzVXbnWqe>8Pj;qtDU~kUhrU_T`ganifWwsp6hjM zpTF`4jz&&K5a92PN=Da!umsTg36uoGcsF+96g72*pZIyqlkG_P?ys<9?#G7;qOaE% z)XoV=4yioXT{WE+zt-5s-Za^gvj7S)H|%Tgc&!OKPwy`GM3Vw@0g@oye@&3oJA?=_ zQc}%g{g%V6xwE6^)-Q#w(~EUd3D-D&RIrH?a5to26nG?O_9J8?**=~pBoavxfM+$4 zJVp>d_S1Y?Q*eO%U5kj*?bwKQvrZm|{_pnu5?@B#Y#ILUePAY&0lz3RN^^878VkWc zD9}g>P)3Wc=AmQM@!z>lAHLBo+H-x|M43n>Bw_v4VBRO(WUBUELRp2mvK)kZB03QV zY%HMW1;DJ|eB6!y1&kfcN9Ueu$L=w_X{4V?cM8%vC1aMP{J`oS{E}luZiE-#2gb*1 zmc@OSpzuKj`=~~@J-0;B|4>V*zQ(m<+7=H6nPwUnH6GHFHQ!0?HH|AmrP|^xu-gxy zB<8-Fn=es*=0e$n=sGAV2Lbl}=^2pyz88(Mw|9Dd;qMx`0>fyyV(aC&rkOsLR!aka znyTwP3$ZS#{c~e!(6gyIWhjn*;u)nu2cbWY!pmq2-}!K1MLmC z!6>*6Co8rq;Brn1UMabowjoX~oMnqjijL>|3&~=88BT{Q9m{F)W){_py0Te8Y47r0 zgyJaYjg41W&~z2cQ?|{F-TNLXEk8`Vc(;=XlcMQ4Qd4wrVv@reLHN@NWXC~ei>$gK zTn5zIM&=we$eBdyRFKY>2(kS|9pg} zU!nT6^sYm#R_@sCpd9p&IbPRgON3FVmBF>4k-G7)bphQpT}Eu=?|iI?Dw$iXlfKSn z(Sbs_l}aD-@f~yuAKXRQB~C|XIL4tBP`~;ps+sx!b;!Y9iVjTH>h|gOS`^Act3M@9 zt58c&D<@O$iaXwwoAkh9XVU(}U#{%NS;3)~;J;uxW~`?DFshwB+dnkJO2y{B(|EDl zvBgW(uX+R51uIsmwaQ)+%=W7EB6BzMFjh>41bChR^ag-YjQ~E4xj$h{a>&Gwp9`}_ zJYKN=6<83F$9>=F3|!@CsclqhcZB_ARpKIk92Dkd59@)xMfbvM&9)-GrbQ_2!Ee2t zqu&g;;s*}o^j9r&hdCCl$9d8#9XEW%Vg&wueIM4Ke72tH2eW&liQIM}F6h$TQL=E=NcjF7 zY4cc?sa^~pDqgEtkvc8tnw28Sz#G=L?Z(G%HZrbOtgjUImdoda5G;)p99${}YW%(1 zy6=~{j1l$#d)~KF$ffS|*OE9|B(VQs*T&PmK`VIuIIIA$=Y9UK&n0j>0@+aX-!}n? zRL}FFFJHXxdQIY!&oug8pvv&+$CVwJ_7*d-mrc?}V0@yqvy77s6r)oWtH(SF_-B-J8x5^{v3J zWZWs3kDJyxv)OZ!_$#cvsBK@Gsfjv0Gr3gHF{@o*oYOLIn|I92(4KSvTrqzq7|_zN zbLR*<2qV|M(Ccqp{1ih5aEZFL$GWC+?!X{0zW3kt9kj^ND!Vys8h6{RS?N!2VQ**y z&=9o)hf_q0k&G*A@c0Rw|aa-FHVW%}izt+f~*MkF7hyIG)>I-n5-kB&z;8ol(@ZFa%*R06cYgU?5Mkm_BDmFzGSE z<>#-hN>A56UDrZsPIkB4)F%)q02lw*K6jb^{1hEEl{?avu=x1e_sNfe?;*$)C5 z%Q0Mf#%>qqD%T460@!(P()5;;X~$N701N*<&6I!#7PTiz!nXLdsPveRc?o5znr54XKXBlkEgvIFG4CTYvmzBS;@Gp@q-dCi1vzn#;MA=WBM!B)_K}W6Q zm)8Jf43;t#)KvUea5)}K5trqd;k(Mj9(W6}x%vX!sz76}1amd(?$dFE=1~zx7rWrf zv-{3&+&*iT<%S|(kme+#5(7apc&KMq2BvX>Yo_p?F~PB(nd6Gi2SarBuZsN{o7;0< z6~x|3hcoik&js~G*ZmV)GrpPzZ=2>%jA~Ct7mcbkFxhY$1jlQ#6m_H3pL8AX%dt}{ zq8#|zEX`Kw-E5n6tkHYydC?@8rF1f0&B>ClxDo{|9j@2brXg}Yo|-G=$K>M5D@U%S z>XL7+^!8!&MH3~S&-wqfRV42r>^>+zVICVcK_K(UqR@!KuTQ&jgfhC&>heGHY#P6< zDfZKK#=74AZ5?45NA%@|5xq@@PIX&vtE%Km0wJH%$QYkwu5B3|qIf-4VjBCPXlO}Q zW?j`{nyTnW-J1$S5S!8b+Uu4@{8x80ng#EK*sDYeBlJqqFRr+iB)3h-N~enFAB`CJ z5BBk|CQvm8`;K@I_FHDZvfJ>FS)j_!*oCneJy};rn&-+!roAqQs{?w)vi$;{3ad|_ zZ|HvGH_kZfJCwJ@o05@1)Ny@i=3W3s^*z4p(;TXG^In>BS;4%wLV{f13VR$#UKo79 zNQ*O_no$Z2X0SuJtmk83w*KjD1!12ZuA`2sBT%7SWiw{mhbxm}a>a%WaS@t=Ogl}} z+*{NvcrifxZZDoT>d(D;wC67K17&{HZ>R=bCT)*NQ_kGM>sAvZr=iFs=g8ArZ7nRcula%?w@Fn@jXy$(dDR<`0)kMG-Y=RAcu?PJ_yqf zI_yPxDBR}JTwaNk1f>OYqdbfF(IafH4%>N{Q|idm1ovwtqVz2Cw9Um0N`@@c(&0Hn z6Zi*lIR+6UxaVn{cDx^4EiN9OD(6OG)wxj)Y>{Yo5b%-H=yq|zAZwyj9#^*tF^}VV zIg(z>!J=J|z!Ab!7Rzx=8LB2is`niGq)uQ9; zBxYMf(8@azNi?M)qm_Zt6scgq;Pr&6_m!3u78Yz}dijGYyRW!;o|fQ$m_Rr-QVVOGL2`Zr z2cc&O@+<$B-rqMn8T1tf7F46L;_qEq;(|K4*><@Jhl0T;6^0mZ6MxgWi!^CJbB>SA zxKj!1!kYI`&64u6{>Um%v53>#QXF6`Us_v^V`W2CE2PWc)+-+qbU8Qeld?qZji;QY zuPqpWAM)Yc?4eAg7@JN}bE^hQN#O3+6tC)H6FF!sbRsiNEM}M-#i67MFy6 zj0BT@rGq7jBwLMu%(3l$vv_Z4Ept-e@#DiqEg9XydbURPWVI$Ym(5s>hrNkSm0tu) zj7wCn&zl;r*S`b1@WOLVfwP0SR6OJYoo5a}mb=j6`w(N9y`#~)YQOTHFU5|_U$bJ=lv*2WgYQ1)(;G3Qm zpIfm_39(FYf=Al@Wm988TOJU;5&Tbt1%yd9sg`LLg<0NyVl?ky6$JS@~q>fbm!0lHZfL~%}aWBRmFG&(f>6Z11r2TT6cmBTaRQ19bz8pWr zc1=E6p%Yg>Ip5>gXv@O+C6OMG3_FjU=z&Ue-UIBgqqKnOiMjhB_c=;Y5<98U-26}b zYO-GtqZuspUXd&qHxd5%H`uhF_rP7F>^^|Hk7m&3HUGc{Bk9~aq|ON& zTD_6*3a|ai(ADPE)I7=||I+u5+mqI~wV6Cf%t@bej(Ks1`GPqHF+&#qe{C9nB@{;w zsBU6Mu$OwQ1knxdXli$oUopzg=qzIL)r2aW%5h6y^{b zNpRIBd;ah=pZnD-Q4j!z9DIK^;Ne0K5!4o@f8)2`eB<;5A^!>K+E-e158p?+D29s= z0sS2<#LwPE+J+~FH8solHE;t`8GpPJ zDd%RIa>;>m7T5sGy$#WC0D7G5DsQkTz=MR!fke*Bf%LoYAhnJHIn)^6EBfP6@sxCR zkV{ewH_JD*?wHPp9K^jm+ zaK{e8?|E#?*yh<6P3FA&0*aj*1BRNSpY<${tZWC5L25xt{@VYtCQ?iR;CXDs%ic*& ze-NZR0;7QDc+Lo7P}Wem5%{71o$Yu7a`Ww~UkOq6YwQ0b><3ts#By$&m-wmM?^2Fk zwj_K`p68*>J?Tvmv6|JICkH{*70jza0eOUGbc^injy~489Jl zejB{0{_3%?fxKb)7^waN5yEIP_Q2ZWsXj^}_g%^f5Y0T_9zVE@OfCYQ1#n_Y#r$i; zK%AN#O7fPfzLV;XH}$U%wl1}TbX-KXNA!A}z=XoDOzXY>oN4=F4{8Bu#%yh(@>cim zzgrGy7$is!fM|~bh>|-~N@XpPnyGdV%I~w=5h`ZQfJ1DtiDfMAt&)%MCm&(ex5ws1 z*jlEFpJxC%=5doJ!2y2}6W#sgY+=ws2dX&6uc()xMk&ShI z$o*NAaIa58_x$L(K~Q)@tJi)qS`UK5(0zQV$D_`=$CF~v{f@%A?vrB|iu>@oFX_ zLFMVw-!x}iH2+BV-w2-F1X)&}50}yLE*!whm;CW5{xR)AWY|*iJWTfmagw-HSL!yh@-=~a3;m15X=}*iRpb2SOmtTuD2S%n{|M~kFoLbw}1Z#b|kvq z*8M4%)Zb8_0}vVK>$X%^eMw`=%e~N-A+M8X!F}jkdUUOBJ9c&O>+GwVRV%Mk`i(NH5a6*3~DsuF2>TDsyflGIZ&ZV5GBxmrYxc)1AnauQ#Lcfk#7r*{U|eN?U9>A7y-xY#8*Cbj0Yh#ZwVQ%5 zRQ+ny1d&a>EBn(`pt|&MY=Go4E;HfZAmtxWAB6U@lYq*%0fn{SRFt6zrPtB7=kwn$ zMcB=6zBH1w;n=o)AObNAq0at6S|B0X?KwP0ve8^32NnW?Tv!8V^|odv#tPI1&}cv-Y^vRn^FYiK89G% zV5rG2^oeL;V*G@L;*9t|OOT*8&)bKLk`f?q-;=>$zvwo7Kvly-kQxYv_;8?kfPSX` z&(E)?oR%iBhwCoE?BBVGW1^y<)+EvrHp07DcQ|3N8R-qoo2GKQAhb^+8?ibEXs6o# zeI!#m_Vr7!n@_^LK=%w7cP3U3&?(J$vvlv=R*(59wV6lt0QiOWCZ;i#}iy zX-TTaDPXh@J#m?JQ4*ioQujy{qYAHZ1SGFtGt;_!c^&GzQ#5031M{wNevsvC8a$ke z6(gG!jMykI@vh{u_-dV{V%$u--|8x_ZXszok{cuvIO5;FjToskU8UAHxetc=2Ghw6 z12*5{Al{M>=ZX=`v_f^O5h8=sQGcYTEWa&!W#dU))s)wjW~^w{#$~d!w8!op!=jMd zR}oCcva>8!TiwWFwmg576l`N33)cSDO`NgaJUFf||D_cur&LK5P9I%Dd2{tGusR_R7GMa_p#(nt|i>^5&>$N5OfZ=3+$$U_YDBM) z_-wB_%WRaGi#+2zsC@W|BfgLSi+&0f%z6}2{e!e;#cVxms1{(IV9hOz=O6G+pUaP% zZAZ8JZhHEVv{`UW<+2ZWdi(W4(^nfw24c7-ew$VNU_;$tIG-A9Y0&%Ohy3luLFxe+ z`ew0J`guvN8%F8|%u99^TohUQRu+~oE8N1ENXD?DKU^J(GGCv>1qB&mHPA+aiP}y_ zgSt7;(g%9`46s}vjiAqTFgXPX3xgMK!I+6J_IUG?jp|yfIiPBiK{J4P5E&AKx7$_n z0{OWKa-q;|a^hjtwcUhe|kvCdpB*SxQq zwjHQ`dOxtCc50#QpiLt1H$JDNO?&bzKxfF`)s#DPTU@3NujkqsP9!4FQ`mC~B_4K@ z0!ISu9yjhTM`Gne6k*I%;z|$QZs$+w91R8=Qmo7gJbX|@U~R2R zfjkXw1r)h!;ZH3z+GX3>uq>7 z9e%u7v7c*w%5Lg)t)iZCw3#P4FQR@w&t&Wj1_p4)+q06_1`)Z*xH9QCg&0V{arqUaskSO^5vo#^AC8o<{bnZ1{1UJ5#>@Wh>Irp-(u2 zzD_^!UF1)=KOao^yETnzS_b56A59kLQyO60U;^L}Xcs<)omksUp&B!vpcEDclJG${fI`%2dcVE`4A`iJS7Z?vOu?wHtZ2 zFqm~kEgsQwc&rg_bqRD1I~UN$SUc7>{9G-lzQNrUZ;f~1S=kyDDbZ?ckMPYY)bHjC zF!wHcw9TNXBSW$tnG#oQn)g~tk1d(POW zRj|XGu=V_gZgnEsS&VfO*bb&EnvN|j_r@IZ`SCkDx`d3RP&P85-N6I?>*Ia8cVWOO zIX^vv=r;BjGXSwvFzn(7*}i7zaE$+}D`i0mB4~W!3N}>?AV+{U715)_18_2nf!y4= z5`Lqn0g@-Mtj7@lBUCW`1ybQSU?YLu3@vzwJhF^njc7md_6kO&0CjB*%7wUwF{Q%L z;|s6za~P~0L|VMx0M_X)_U)&^Oevr^I`Jv9eZVDfia<}A8zQmF(SRR67-l{+$24l` z{~hOSduGc^E2Vy&k-Np*^xZ87_BYzq+5(QRom8m?5aT44-&{dwFA2zs)f3|Kll4}? z_?f6EEg`4yEkZrTmPcvuk6__^i#hRSnMV{6u0_l;GOJJi3~0VGqF~Ic1Kf?EvI4v~ z8{SBfPS2Kp*|y3`Q=2ZYvGTRtw(+jCEB{xK~U?{8^L9>`SlQJcB9a(2no2yV0v+9FCphuE8~UJ{-& zi_2f^&FPvPxLYR0o+aoXU6GGp-|fx*bpM@XaqK=o-j?>ct`s3GfKhYpd{RGxc7HIa z-hvIh76xjPFN-yTWq^GFMFIak`@lwl!T$XJyYMzY2cWqW1J8us@!zc)|LH^s{*3@u zBlze4Q>RB-J%D#~UhZQAypphF$W+h+Wu`-uldQmQgKULm6;IR0Ax^Sy6I!Rknd7R;!N<;P#A%nK!R|V$%{$d$SC3i)b z*w^!^m_pGr^~9l7$Se5rZO!Crev^66Rwhn}V8AW|!KKxMH016p-10Exrz|LQhk~Uw zQ=2`uM32$VXkxX0(ySSA9B^ z+%}cOtdRD?w2A+Rnq}S1uA{1rF4^tP%qi5MIY#l;_r8E zk?Z5}pgK)CbEMSmtI(!tgF~Nrn7)>NRK45AeIP~=7~EBC7&;_|XSKRPp7>*B*8FbU z%s|DMV?%E3kWbM%tci!5h1=C2*-47YAhCJzB$nPN-;3|Zps=w; z&?j;A&4Fk@qjvlVP|7O$B>e-tITM?-$UAp{C%Hc{`Dt8&6p0)+g10jwHNN{yj&t;BtNy z-28W0^YHGI^i~e|U0W+Asn>&=3cvQ(1**rfovby-Dcm@J%a=Mfy0TX6qZ-YAvp9)a zusm{D^Uf=EB!Jzxe!lH9;0?WZzwW-v{(uQf2Ox_7-cAa9K(Be*Lk7$L+je8O2%ak$ zOuA>Y5xEnknKK{~OR?+-e2{Z>_casFH5m{;Gxz5uqM&%xx_V>!)^<4z#{AC^j)j|5 zgMWL!!eHlLP9XSdz^}|ulEWC9ocl(5knJ~-qeD@sfe~fXHsv5n(3IxQSaE(%e$aZ( z`j06Sn0GQHU~!=5P$pWF*NHJs=`m*VCtbB7v-%iH3AIt}g__#htQV(r9q|^2mWa)E z7FC;|XPAW;*XFVWtVzSP&&2aWxQKg^t3V%If`kbM`Y>9Vo3CtcW|lognsU6y zCDgfQ>d(}?`=AeRINe6AxMGoX2xFg5j;pM0-|t2npRx5D=f0$P6S9xuYZxj7>|r}# zKE)t+FdEg+&;U5Tj+slWp<{^_yg*3>*pqT$pripa9_lQ6(Nj_epn2uxzX1ju!=!&t zu;@TgVhE8A^@95@l#qVgAEofl@qd-ltn`^uIFl}F_Ef6y2!}$kS;mZ{r0|bzDDDv* zZMT46#H*cj-EBcrPe_^j()?&$5 z5pmEvjUTy_GpUreWZ+C>Iz4jFPl)y27}evi;aJ{sP>f8cN_1dOwg%Pcy4t;wA`u?4 zvO*g?XyK`InU<9?Zb%TIAc(YcU;w1>AyL%CK$BT$ib78rnPEOc2si!4o21jzJ&X> zW$gEzh-7Mc34jPIr#;ASFn)n)*na;t~Wz5^{Q z*e&U!Z6)_5Amzt>RUD6Dr|vL|aQk&%1MGW7-W+|gYf1X+KNp3RLK=Xv;`ivYp@I`{LXXWJ!hI}@(PXy0{Wo|XQ9sF)1by}A*66RX~76<=QCcBk#l`zqbMlZ9^xf$GajdeB7s)6%SLHOnK3Bogj& zD!9~ugAQ^Eu3Q1)#I)Q?@>^S49UiW633dZqeO3M!Se7ViNaK`2N{|H8Bdd-0pVKk< zaquCy>)+#_odlZE^Al4>7f~#7t4;Lw&r`!^)@2*pY2Glyq*Nd;px-7i2d)Bu+K8X$ z?MttRK9J#VfOfQsao|~r;@@+c=m=^VAnh<&V@T3 zy|~q+%21D1DxZ3~TW)p9dzWx9z!hwH5~(XF`WYkPf@}F8()xhi9Z%VRkR-yB{*Mxf zM|k^O&4ugpziHC(&B4hJxOYR7LqCSurq>>p9@y|$bO0DD$+X+%z($pyr&$5tpai(= zW^fVRfFlFpXFK0cILiMU+F{^Lm|!C7Ku_8{P|i??m1%j4TL1Xs9bj`me=LYXp$yUb zU!YOZ3Ilxp!<$*~L+FBZKra3(qmK&{dc*m8)Tu_JXC#GzDI@_%NkP*DC!LRXjk}ZJ z2YGpbG~=QzDMPp_-|2x<9RqSfT0`T#kR}*|eH1<-#cBVXP{Vn@Wh22R>CVMHNuI~c zjI4+-Uv;UMRyJqV@%gO{-5vLHZi(`NrdTdT;O6-|k|=vVMt}w~Q|SWS?z>QF#0d)$ zm}5>9il-Hwsph>HAY>=valE_fb>`;M4sHT{bm4poh@&nWPlkg+r~;i(0y*XKlS&60 zL#)^|T(5ysz2gHQ%ml>}NFq@$P?qra9(Tr;^<0qRHxQ=s_6_2}3k&Fq@kai+bVhp# z@B{^@fa~;bb1`D;AQ$;!lY_4oz@5kxb?%jBX_wtZU1sa`Z}Qw7VaYWblPOemqkM26 zioni(I~tW`6@xeeuvfbXOIEi|Ww{ec41oDRXKIkqb*4l``37Q*?{3NeI5ntpvW17{}h9b?Fm_H*It zDu|B2@a_XdeJ;OWfOO{$NcivvY9jZ&RE(7eYZy6E^ivr-lADK@rW+1Pr zba#x+w4UdUvxoX4YE4h)&bhqnF+y1C!J&iZr(C3Y5)|)jI?V|ZizExE5+_C*0dZE% z=|1RqXp%Gy+@aSXsEjF0f^F>amGsmXgQMCsWUK3ycD~@}ib(W*URTivU~WdVToEZW zMs9AuM@vnNM6QmFbNSW|-&+z6C|UG6v%H&ZGn-qRpLa8aG$pwFwbf4OCq!vDX`jeo zGwyI0W5VnYT>qGP$VIvIDO|H6`-2~^$2XYLdZP7dB%=#odZl( z)lz2TpIZf>=fB7xD->u>yV&PK*b}6N21wL5ppJIr+94h$X<-ix+Ak+Le<$4@ZpEt2 zAyF=W;Xt1Y2VIxY^=X$4{ib3UqJKJL{M_8k8@hkXhrVuQ^UVHo&(vhOK#)~kig|2W zq$(Xn^vm|%Y|5fq+r18cb$mgcIMdo+qOPipURJVimjmOp%FP#Cb`KwZ@CjcKaxHny zjFG_r=#j<}u&;$c3C+IWi?6vsf+jR9M`>$Q2ILCuletd5DAZ4vcF zd+9hG_k6bdW7WW>nLl;9n^It9rGVT&H5u57U^f(z58(>AE29QaHE%cxGL@5&!9JnF z0o**1XT8!Dm7YG^+W{>#E$ZKN8v97rPm+@9N8hQbsC>u2Htx=lvsWxVZJ$WN)*E|y z{oXhMU;Fj=-e`^KHCDF8P-;&Hx49Te3c=3Cq*j)r=_0@!VPSII_>EvvsLVBK^sddH zwIu^Ud+BW6@!9S<>Xbq5GkesXw!tFLy|qZePdbr3k$?v5$-Jd8;7@d_$IIC2>!XMTwjM4cF4j~~mF0~PM- z-8Yx_SLl!wg`CcysZ@YDF|FtRHyN-geu4f2L~nse$ifiubpuNx#IGzD+_uyVLYcKV zhpQZO6`so<+oyF#6?UpyrM}J0EW0ZI7-x4UN|Ya4ft{y}8ZnUM|;>bPtMu(3-pc=zr0gJI&pm zTfd}CgGIU4S*_!(ev|MD&XsDRy>?IZ=1}1utnVVUzRB?@(wt^2i}sP{I-8E0qv7H8 zG_Ne#3rsnTC%@DM6UnW1j_H%~78$SQPO+o1P}S^_gVCTv*bm0LF6zKZro{v_WV#~d zV2%LFC%ravf6a(WfPcBQU}^3vDy7MMpYPgEi(--+i0}B`<2G8+Hy9VhY0lAVy;+**M<_j*c!kU8 z#S55^il@%!m)U7)lEz{`dZu51qpH(8m9HZ*-nktt+HUJyWLpxMhP%NGYRA$-JF z*>qG%yYue)m07o@&`sfm`yuyoj0+zEh^G5pWOB&T`g+E}qY;x{fc_2x3{K6fD5FQh zGRCuXfZ|o(L~eH3q!N59EdWWaPSMXcSRHU6T;{ug)*oFZ;$KGcgcR2LP2K3jSP!2l z?UPv1LLwyvdjOX6tq*8c5wC2}8vH>Khc$O>{(H=gV~jg8Df#d6+EZcazm|l5E%AJE zVEhRXK>9TL&y*mkgOaVYZQa)Kd}f!qS_ra19Ss==wfovseL|n5kdTtGGPeb&e#P6h zenI)?zOeXjhJ(DaKmhp`$7SAi@;LcWN?AzXP?fi%;l%YlQcy#p{Yi5#i>WxHmCWY$ zpyC~R7a!25%nr}snbK=C3E=LU01%A@=SDHYtr>&dDs&=qsWd7t=Fl`w;`1c3Eiu>tpMk9D_b7k^nU^OJeKk1=LUdGOe zKbCIx4VOTAV}nT&+A?$X+0Sg=#0mqG^b7MQfA44A$0~NCC-wJ>(R{HgS1#RF?Q`6H z@Vs1<5Mg&etGk+tn|Uj}IK1Ki?cNkRMh$DsE3?d znXO@3;;$e_^qkrq()W{RZ&l2%olJu2*-xO~To^SKFG_3K8>EK%-JV5Y#*|Uje$&{C z)kQ+?WBnUYW_ogRVnr#Z6nhN-<-T^BcLX^l$;k@+*5{=bj>CY!jdHgM34}gxZyHan zSLM~>rJ8T~Y9eqj`lC-irZdUq!EZm*jm|#%?bV$mEYDfcIg)GtsUOaG5CXgB)?)R%buYQVNX>xe$D&*4Z% zS3My6=Bmo-Is4?r9(z%;iYq&})KEU%Vuf zy!ETup*5nOhi6Gmk0Alox8!{Dh`trWd9;;5t3R3qOxorsE%IcEcq0t=!gT26Z|k%wdriIu@i3&OgRn?psvg+=w5D9;cuxq<`M* z414(WtTe<-4iz@FkY9bfYyuM@OYO?Q=LqNx?B@Nz$mC08~qdG(o#Y6yNU5)^S!Vm-+iX1 zn^^X#uWBO_3Fj`=Sg!Y1K9M!%&bSI~*ZS2h%x_UTpy5$`FG<^+198ZOX)E3gP_ZS# ztIFj?V4q^4;eyjFY&;Yioq9ny8f3$k#zFDC?Qh8PI@CyMOLE`^x1%`qg}mSAAKC5y zXQ38=4W8|77QEW~GHgV2N6r9(B{?>b&e05MmM5Uk87*Rj-M=a28Z~Tn7FI}t^MQL| zej}oRD5R2sHv-z1nzb%9{v7j zO-tqmyN@D2h@!ZpW00wpTOBE(2SKf_p92pM(f}NlpB5^z>H(AthL$e#8#bMqi&c5T zK5@Hh27RUXdtV~wH72oxaxg&=a3eU%$CsB32Jg=WYkhBoDd&MYgM2Gt#{Q!vGyDlI z(eq^}QeFcCTDL7m5gn2~a)R|kYO{hh5q-xN4;MHpvsKdBoIJX`WyQXA+JWTaSNjtI_guRx%G_Q6@{Ol}r1lUzKAf_hb5rEU#gJ-1(Jx{; zI4))9uj~0SjPQ!4CeqJKt-8|brHz2iy&{QeA=l8xR)P@+X^cL7ZfQ|}#xutIfxwky z!4T$8&3A1d5Rr@U(HU-)TZ}^gdZU#(25N|@bgRSV?kNiouS`7-W+?qMXHxI}vNMjo zu=;Hi+=b@XNVfnSr>WK7V$}_dXUXJdSQ*&_v|}S*y^vY z)t$7!9G3%eXtoz3dHab!qkG&=0||GlxqWIs_9uw^7Unc}YaXvh-Ev+I95d%|RX^DA4OMGXzMs*}4tNZL=eqWx%RoLHlcwnpwo(Xr7yJF_ zHPhhLI&y8U(8T~XHdo(!WTUM)LyA;)iQJx#ve}z99edzQF6Fj&Tu)4CQgx|29DPWg z#HRNL5U+V|zdLu+FQ}-lk?9~~`5st=bC?%!n0p#OdnUSyHHTs?>2RT{t2L+sCM&?4 zf^9}Xkj}J*YCNM_*!(G6zC9dJJH1osd8+%+BEQmo=t(Dm5Ih;}ZXH4Vh94>QI??pm z!#mkkmN(5hULuhC{Qk;RU&^S2X%O&Vq*#33HF9_56pXoAw%BhxWjoVk>i2l(uiou) zG?1vQfXp%kHD(Y4LoA>gqu}r8j6xU1rgp^ag?~IvwU|vuIZbGyk}uzR2-?AsW_RY&RCnd+K(&sb8gnEUVi3RZtf z>yaoAmR&lZ|7T47#GWXL=Pk2GfkV_Hm)gy|YsVAbt_!shsmhl64*DTT2A&{r?PnfG z{5?hU-5V8=BYKh8cjnnc)?n!MrDt_anLW2B zFwp3pI?(VA^ulc0+Ffae_!cBvV*}oM8LZH^8Lb=LP;&qUTlWWDJ>nnYf*@4a--j3aqTl>DE}qWdeM7?T zsH2REA}y@lA3Hn!?lk{B(-%jyChZxssTphJvCMjdXIKE3+{ zHREZJsugf|k3lA^-F+Xh4RBIo=LasWnd|*Mi~g6%z615sGi8s-$DZK-w#7^o@=d_& z)#?D2fPehq+sD9ycHbl3c|R#FLx7561Dz0NK}{g&kGllelaE5)T?b`_qe**^m2TGY zt{$P-a|)H_0W|;XiTVMCztb}AzKacNH$jUg8m~Xa6D=g~AjMaUz4*W^CGCAsdDblC zEIiK?T~2O(+BHWIwS_xQv&gI%Q`|rB@15p#46b! zE+~k{4^@)9U@hRPiI62NfCHK_(c`1V36ul``@DOY=k*8h+gK8if#@bqe>9T_LeWqS zsRM2KwbSRuib$P8`s^zAK~GaT=JrJN4Gatn(2W5iC_ciAszDDte5o^}i*;dX0|a0n zar_|3Pq5e}+3@cgk*Z?IzQ*k#Obed-&m4ynU2rpz5>h^+%lLcZQ*qyugvSAsH0b@d zqm>5eHmeN%=M=OEi#KOJ+k{gi9K?=_0qugRTyM`MuIq`4VDb!=kbh0sllpcPI(-T3 zgPhx_sA~e11)_i|Y&q!Qdm`RpE7Y75u*-%_Aw{(igj=)4Q>VjTI!}r#e-`^<19v#s z->l=*VL3>_yn~AKTuEE@my-2Ib3ukUWcfANeE%r2_5H_g^YVc%;HWuR=9S!dF2doe zcmja$&qNWo?&=CvRG*Ec*auTpR{-*;I{zVb|8u)`$chzof`qBX#Z;;3Nw8 zzkem!MjQ0KRI=`Zc0de&TR!S1U4yDYiGPlWVAKuX~)vy4G)27gsBnCtb>FpUQK6a(kc z(;?`RQ-FGxy?_m>-8!&5>t zA)#k40GlpJLH44kyid;cNsh8(nnwv*`HrS@9<`ww9U-L6Q<^qr*B_*wNp)}3hD(D6 zzg3y1088GP88i+v=`Q2y3*$Y8~|hxSOt`3!V7?s(y6lqR9iPE>W$WO zZT1CHGa>Vws-iJ;n5@ZW2P26vl)0*Y1;PgIg$L)O$hl?ZckWxH*<>W0;`A_wwwSZf8ImqdBjvL5~ zO_zbKZ4t18x7=0&`bmCEV)2~J6={9g9VD#`d=I)&&wcI>7z&+R)$(sJ-`me5cx7c_ z0L0i|ddaz+lwLsz;T|x6sp-^cD zBVixFLhXX3eaq6M=?}wJL%QPNdBUOtAnF+EWN*~qL zAK+zn9A4JSOg&ZsScg{0ZlxH!b=Sml>%j8D3i#Q?S73MZ5s8`uuLQDD(#ty=gwN=c zD@jmx3safC3g8`6e-@r({-4IaJRZun{aYo9M7BHGLX<7*kgO4k#x{1PWH)w643UtX zYHWjK8_Nt~8atJ;WX--WS<4<}$@V+1(f!=d^FF`#^S*!id|cOD=YE{W@;$x>&|~U+ zw~{RZXUD4-SANfDj-oiQcO$y&sGj$L+Xmu~jHwy5Z5V=rJm@6eYw1G$j3zDdq9JV~ z_;XG7Fyc;OQ%9I}KiT{$icC9ebXJW=q&nnHrs&pF3B!)(z!_ns+uL&lev+2GM!V}e zJ||#mB0{7#w(8Zn{e^e?n#;H6x4YB!FL+&r?QZ%Xl#NuB@UUV^>^+7dKHH00?>4FI zHTY||o|87;`aF2n9eF>uA^frh@|o*bJb{lK$X8|2Jy> z;v0U%U$PwcbTiH>j+{N$`wL2BbvOp_CYeUUvU_!;yfbn-!lblNEv zTIZ?+(qaJj*xLV@ z0>FRc8=~4r!ZCjM`~n;FOi~rw99X`RcNZ|BnMy4X%T5QK%HuCgRz(#w+uW4JeG5aL;$3!MH`rHSv`IF}K95;JtbNF{uKm&!&qg9t>Kjp z2pk0c+FF@yvq#$y(KlN5(`?GQayH&ykg(OrdQnnJN*M}10R-_H*BV$mQe=^a<&SgN ziES4Tg7-h3f&t3={g#AQ1SBl*3ue@iE~o8zCGDn16P--A5U+Nfl2m&$DC0%$YANPH zWu{dC#^ODtokqo(uZMaEi}qtvn`@XrB-G-0P|fA~2;v=uWBwq)dhZn_kNYeGnup}p@+&sgk__dLpo%V-mQ;UOvrhY|oP%Ou5uDIl?v zvx=8e!DU=)TQl-mAXMfxPzpu{_`^ZPoO)1J1CZvQWHIBk-&31%w>+qTBKpFleRM)H zdsoI|_Ldu%s9>Ogvw7}zm3Q3(w}36DRHT0~$+7p4G+ChQ;jha{=N^!R?`SWdW-+^Q z7_{bz9(pI`kY*WHap2hM+1tQL66lA+)wl+Ri^FNo9)l7!0cX~;Na2`~y(pHJ$eIHd zH_1_}v(omww8e{AZ`O`8e>^LGH$It{hqLRPI-LPE;Ke;$$~aXoFRxY7ndweosZxwo zn2pVgecn1XNSTNYT?@Mlp@i6(z$Yd!C7*zUH*^Y8SzqN=H0N$7{n!Hj-4Avy;)Ql4Lx&1(T$E(~ zEFele90-#}EF>$?jVct~X#0pL_7zn&+*}ikzJ$TBBnbpCd0VW$q8ec0F2H4qBQB?B z=I4=)4b+Frp!KwB?5#VFj+{aS5NibJK+;YSNCi6`9EN|~6;s9C*32Y#1!)2pCs5jZ zpjO0WSSnX00l6)Wm1K>nH2}dL6ksbO37jLRIzZ1336jo;U#&7t#LBJNBprty2LcEl z{U+GY>A(*L_F$VS^`Djlf~Q{n24qeMYi81h0?A|pUqCO9!cFWRq=HUjh$NUW=sp=O zxVG+k&&1IIu0<;Vs{%8E2#^N(Wq{^54Tg+wrQ}nFZ||7p+$~*GN~-TYs66p!_Q~#^ z=8!WsGP)@7gbOlxAv*DAk+o^gl{4*T$d&S?Sy^Y1oU;deW}tjFNk)-F7@==S1a~Ln z84-?!CI7v~8AcR}gXaLGh>$7KK~)uiQ3_FrbBFyEWex+?PG ze+rxck-RVHV{6ce_R9I>HxO3&BVkrlS#B9!XwRuy$vt)Sw-kkx!LU4j z(=drdY^$-&;=U*_c@OnIt0lRqAxf*?kmBd^Vh~AV8STJuN55^HoOFIf;op~+v4bbj z3_m596emQ1asbujvG0@%!9BUx%BQRRP&usaHlj7&@PU)ro>zCmxvKW->}FAdv*!rT zR(DlTW_#K=!6I2rBsL6I&cgpCu=4OP6<(hQ>AZ&=BZR=zAA>^Oiof@Z+}aw;v3P5M zuYhnd1;yddg0OP?k9&&i%QM1wffKxNu#TSK9D7p2I1VS8LGpheia@fvB-X|Mr@~b{ zRg-H;aI0r~wH4+TY$3T;hupeKH6>vA3)L_$BX0349PA%ZwH;^@q%}^#vYlelLzL3; z(QUR%6(%l000xW{Bp|?j2~OV{M%=Ub$H4bPfWuys$yr_fHWNUTNs@h}ov+%E=@JLz zk~xHXhVI4yk^j(nnt5E}O-Nt70)*8o(~l(DH}^Bu*3F;msDVe507%`#fP!I(+jrI= z{8&{2*E*g^5^14j*dm#p+@-JI5d}P#GHvuoU z{x>-oU4~r^Luh5(Hffd7Z!7#z&wVt|Nk|+XHc4bEqP6gnRsf5m+G0}!7&>FQV8HT$ z8U$2?K)DO;xXFo}pXTpI7)N(Z_zG}x<@cbSrQ&$;a3^6uG0k03>ABAOb8&Hr!rq85 zJA1>=0!fw7Xj~a$R1oPBYPRV{b!p}+&>A}IeajB$8VAS(8)RYeqR)&7(^#hCw z(#bLf51eO58$OK31I6s|J%DS-(N0tF->*Kr9#(r9#SZe4lC>iY+mV=QVaIjr)jY|L zMP$6`jA2d19m0*37G$cd&73|`m^0aP%B)zbQN3g(rc|U4%VdLk)kM@qDhba#>#ZBf zs9@bj{iO|jBmcYlsURxq3Q=j2K&BIK`0r~-<|v`sJuqnam0&m+U)p7NtNh~Twt`}u zfk~K&^@1QKiO2iHu!ofd(qqWRinnE*D%wDBq1SkKt?#xO@nKhKl|tgMu1McV)cKl2 zW%Ub#_zgs@*!tzE&j5{j1K|qTammOpJj0?XeFJ0c&xv?}3R<<$ylnbr{*=Fn7y&{PhUubJzFQD&QbEesjxtr{EPZC zwm8^cBcgc4@ssXOzuJ2#jZ1K$Vs&`_RNyVFrJki+XQ&W5e(_HbXKMiW%-#@D|+u}~JnpQELIsri+N&Ec9 zJ@}859$>yGQ0NXb;IQS?JVPnOgOb<2H_Q=O5eSdy*UE+9;z?>G(2By(%rLxjxFuId zkDO!aS2$1oW-q_k&@+-df7T_Cq)FDpq2teuxHF=^a>qoMO( zI_`icUt#WczV7-;Lm}i5XlbwP1ZYBh#@lvK(8^U8wxU29?k6L}0Qn`l?(=DM>Y>5M zJnr|*+7WZ__2$zzo5e-n>@8&~U{Wc4i}@ygudQQ9p-I`~{&+6xpe-67J`U@{msl(X zRQ2V*Os1RqlDlkPn$Xy6e8gXiULDUDhAtp}bEa8_y%q{f1VmYF1))CoQcD8{=4fPU0CVFl9Xc}NMl|jgTr&yY)XE=vbRvg8=GiF#^bUQN~^ zhKSp{d@h^qH29V2Q`+<%0OzD$z_O&!Ns`F6psHpH5raql;d)NUWpg1H=j(6R7MrPi zOJah0AFX6Ly?p-izxDj|E)8!hweEJ3Hw<^unsNU3_DUdJV`ShK=GltfRCXdFN zugz5AYhhPWbG3t0_E4XkPqFC}ANE&wyt(=rcd*r7dc#(yfb2@;vTXU-@=nb2Ik;DW zIViJ0Qt0Sh;AcDlcjYueNU$sCCaS>6oXGbj_M)GI_lsVcO2R`>uEsr0O{ZBQ+L0TT zbd%6`s~jh}_*4ezc-Tvy1YI%G6$JqFraCLO!5TlKqp|2{7fyA@jS$?F@l)YZ{DdRRIh$G zX(4P+haz>jv%U@_Zm-?0X+27@GBIetG~Z_66_bi?Yi(20021=YWEeXzb8x}-|90=! zV(3q-c(a#xM(aO3Z7Lu+^YsZB_}?xKI*^+@oVXt&qVjX*c_iE02X@VQe~j^qBMwxT zkRYH+9aZ=eh#n|r__t~TNJ=@xAyo!sRKHIE^h_%*sgDPnk6AMt9V~Dko+@<&Ui(#m zFM7>5ZevdGU(D|+4Ka_$wx9BQ61_$quCtdVAR-U;3uqO0MM=g)EFb)44C~CEX!O|m zU1!RJMQvXD@X)_ver>$lpK=eck&j`Qd*tfs3K*Qp$h<$Jx^jXTEXk9@?OXuwFL&od zSYYSZxFl01=J`s{2AdvV-Aap~lk8BXh`wfFGeojXr)M`MzEsjJVeipQ76tehK}U-Y$l z%l5<{5w-mx)}DG$Z#>?J5PBmRi@t+*Oe1)&&w}Xp{#RQ6*uy3(V8ME0-MlKdh<%Z* zW);b}^er#l83{hY*%=#z0t=yL^?Tj9Aq}F9O_vnroVHXEZDH*?hoK5}2Zh|nE&z*p z8o}M)eXQri?5iOM&&A>UCkYoQ{$+NCnw) zj_KfvX}KbL73@{>djbXUKD{5GcpZ~i8p%2yR~hni;M#MYDC=w*LiXS=%sB{{bKR`- zfFe}oQN!*mlzk1ZyUXxmfz-AEo})GDZ`CaL=QBOqj=1omhMvWm>3+0RHO&h)SRq)f zP}mGvzGN<>U+JsvIiRa3n;Fx400?L#k8~uD_Y7PCSt0x#sLrx#j7F}}Yo)?q0oC>FgY4BEOnF)M zW)x;pQdP-AwCxk;5l)h39aN3Un1#a*Os@=uQW&OVT%2f`nS&Jw0=Ng}%nqQg+=Hp) zZb56**yy~VfXUc~`0b~-uUqDima>hGt@92ydeJz?H%WK*w7c@N8yrGl*DGwa6c{kpG43f@T=F z+~Xk|lk5gFWCP!l48BwQlo#;VG+h81J~add2d~j%?myNzo&^dz0mIwsO#3uWpg^`HQ!G0cXYC*>1y z?~>gCkGkS{-XT(+7I?FVQGuGu?*8<8=G2G!px#S~46c4f_g)GKmT>SdI@FyMFNY(h zMcdC95QmoT$+l6_!JSl!BP-?%qs|$~24qAsZ7INqye|}dsG*qVl@l?)52(OM-G1N8 zsK7h#!IP!|XNVPEfBidoYV~<v9bMyus zwaLj$RrR<>sOOW}_cu+y<6yZ?7kdNP3>M-dP96@9e>y8h3K{;n@4`S(9{B2&S<`qJ zrJxnZ>@m|Q14CA2=dKZV9Y<99uZIbO(+g8^Hg#2kBKiyZ^oEwDUwmX}Lkk2hlcduN zV5T!()y&tjvE6BH`Ra*B3yWWw_x+lRZ*Vl0s+Fo9$*P!nJ+Ps(u*6}i{eyiXxAxDS zaXCr&t$FmFVxxMJCrGJ;F^|8x=!?DHFX=|N_UhkL(Gh)&wYGixLz^Q*_qSduxIZ<_ z18G53maZLz89G%`>i0{FobC)~y&@39ND1YnMN^;+-yXA7L(^ROfW6r}&eo`~9`5cT zhorP3DjKRsmrD0(cjk&Xb|YNx|mP(Yc^ljCH)t-m4rHt zW)g~C47yIpK2PHKmF}6S>PS`;q0}BEPL*6gHKlnten7Jj!yPXOg!>i4tKkFLp!VXw z?MB`)UO>)5CI<5EYVX0owXReGnUIHHh5mWC(tyo`gX$na=luP1eUyKeNC~l(b1y8k zQ06-`wp~ns@>M_2Au7@Xu3(s_hr@=BPuj*9A>ic<0>5jEiOk zmOmJz$|xSjc=wJD!H_!UHqm!?8$x$epVD$Vs7tmc;17&5NdB|)CcBcs@Em#toay!% zB2iL|>Jd}gr2-+Bc{l1uHC)Kg7ag}ph64A z=6>3dv+X#tT@cUQcfv=+yjfzyXJ>1%DxU#%THn@#?o|;*Db0PCPxR40MuqPTG#0ru znJ1u9Ru8`#k%kTB*Q>L==BJ?VQJt9z`w^ibZJL_YL$PtTEv#?Il6<(-TWkjJu(>tl zs!(34eB7}(G_Lq+-(bx}fv;@iqh0W5nu>-Fn3mn)I@X&Z@D&|`*~0;iLiPrgo%5|( zQbCP&ryo(XO9l0|)jH{pVUWAKW^dp19jZp#*+U$L*5i1Y6pbs5n?AjljZ-&6TFw6% zw*uUPA*qA8jW}#?>H{!Iy6_=-z*5*Hhue-H7giA!#kRa z#twNcfc*#C^9AoXFQA->(8vVxGwJp%Zc5M=NUJ__Z~Zo}57nU%1Rg8`&$@j8^H6 zT#H^(pZc>60ZjhM5)U40C-VABf1Ompc%>mTQ>}D$Ld6XIyefb$fsS6#ybO1fYf9*- zEPXLjBu1WJQ}yXD_^Fg(>&(!LqRCUn@Dtuu(nnycKEmFer5&zTPd~^enLStuf#rJM zTXV?-QD2Z83eq`+WF&*Nv9tOu8lN1S=7y`gYX+O8Ow2qSIa))Omr7QuZxAiU+u)yt z?Pb3^aFW}aIaR-36>HU69@ROyA-^>Mkm(><6uKvXf~4MF=dasw8{5*YUN=m<`uf`H`hKQGdrx_>G{fPFu6Sw0%2U`_FHAzue@M;$M3) zu`an{n-4YPa^Llg&B?__Wock%S&vYzwWEZeuRg;I3 zUOcI_Un5vzr2FSE#$c^ZdW@Cw^6(bAL9}ns)<8eO4j3TTty;*602CvRHP6jj%DC5){GL0{~^8_ZesiP5yo>z3mZPUB;p-Y+JyLIm27G1zt04VZa+(mpEvCqKXg2^}#u$i<> zl{=NgpF*6b+?;Qy$ciubw9atAWIZwW`oLodkXM#k0e?UATo06`f&TLI7GVcm`gN~PKU|W&0Op%a>9|f1_PX;a z8A^2AqS?_G9L1rHM@=aAx8bbMA;Qs&} CGJkFW literal 0 HcmV?d00001 diff --git a/doc/arch/cache/images/cache-span-layout.png b/doc/arch/cache/images/cache-span-layout.png new file mode 100644 index 0000000000000000000000000000000000000000..317d69af4567edabe674581c4c8e1c3c80a48969 GIT binary patch literal 8533 zcmeHtXH-*Nv@J;QO+pO9Yyz7sdhihw_a&e{sH1jT5!W#EL^N=8kFfS@db>e!qFIH!Or8afjYh?$&! z2(S4(1rBbmD9Op_x|{w+|hkfXYX>2!w`rS zBOdNI7?{%QSFhw)Z+Sm-oGx^X{q$7^6&aF&RdmY0cC}+*+%*{3tCu)%lrau0A`E9! z5`?oQT!jesPu6?rym!AW)pWcdv{Uqq=AX+kTeeTVyu6U&UTwpKAO+qa1Q*HXPur4y z7`F!o_Ej1O)+Q`AZGeEm`}3hfjVHSk)echwMtnpd1mGIEkCFeb|eAa(?+V={oBa~%9Yjr}@A{e{=ZvIu=ZPL&1goWv8 z(}dSIb*LSt>+?#Uy`F8Hvu^k5?0tx!WNQY8Q5DXp9dGAI*;`wK-|iMRhBKyPd%}gl zk_mwv{KO?qr?t7zwdsaTSsx-5^{BYGE8YeafoTwFX6Vh(Z)cgw@X zqYO!_9cMGhrB922myf(iejku z$*Nhe(j_Q)kJ2~iCG|ZvLw{3kbw&d7Zl$PlHN;mUN$15dQ+@(7s6l7=%2KcD04V-0 zvh`qlP-BO!=}7RSb;9fKk>5pph^LgE-70()=bMW4B~&f`?lfohSXmK{Knm{qr?|Pe z%%!*X-u+s*rUU;lyUzJBDIGhXwauP)wAm}P(`Y<@W%8>}=`Zf?abA{D-+8uXSPgW1 z%EkFP)GsSB(KZQRyLMWV3UN`6xJQ4Pt=VNq$C9@nAs`QpzC@PvT?FHNJfQN~IN^0} z&w~vAvk_>i8mT*loU;!jqXxJ*^rX4ebB)AjRGdvw>g=Fz=TL6nuT2ER^2ptL<2Ujf zf7Gm~k)B^DC0m`f_OlXGNn3}c#Al*KMPGL3tcHEla&?+?&Q9K|lPV1MHAFf%q>bEr z`(~6fS@bZGP}YwKys%wv$yv(3}FJxdt$kyMHT5l#QpJ2(h{N7q-G`RJ7 z$g2f8AWtx0`U! zV|Ap+UL|fm>uSe!)3`y6rtHSEvg?N`i{ye5?C*SK`#kJRijt~xzf@K}I0~p9hK-QZ z22nQN_8kvs8p=o$k7iB8|4u$pRenp}FZ8FJgbarq3+aIwr5$_Lz9m8sEomCS=@1lz zCn>*D-|A;pk>QSJWi7k^5Y9^+wONwa{et4E z{(V{e`1_M=t`8VuF5DWo5G}aUYes9^D^qscrt!KJM%pg(ZAW{13Pol&f zP01Wg&nS_K(@zpc?oD{mk-Y8orsKh@3Zs7D$1i)69;a`_hn$|G#CbiRhpWQv2Ter! zaZIl>IW8sOD5UCB=_|su8gFa4Ki>C46j1BUAusvI$)G=-@GVex>)Z~Qc_Myf= zli^2&V%M6x@JyrI${XC>u)y|`%gP>fJU=lo7;!98^?tt7sQ(9d`VAFX%|+!t&VBLpFX1C!kn{8g!Hm_zl5y5E*>7U|%X=T_8OAk7XD+{M7CWl_HM zu?a?_Bs{o%)u6Yd&cR)QZ4H4V?r`JZ8Q7}<2u+L7K^+(BuF=gQ| zptPQ#gvmimmsAcIS@D5cB0HarzLM~K9)yv6ybBchTO61)1A9NAR*jg{ae3IGnqgsd z*h*3`e-!pX=#Ez3oC#t4ZI`Q5mZ;@JTB2891$x?`X*mzC-R@R%D`%7s@8M zx#!c8sV=GN2x^l2cu@)z-Y3k^N#vP{6Bs=nP*o{iMw07_GNivKB$|qa5k_wisR!ZG z2{IYs91KCSzA9C*gac3hTMPQs*pGFb8Q7O}7g-U;z(UCX<|qB%Ygzy0NJ9{1()iBf zaC`Z%M)V14oC#8_je(W%^}^^Uqj*#dP-;aOSgao+R1yo~^JBewH62SCx>qjn&F0wf z^4i2~7TfoGK^sSwalQ;JX)j46wM_57OlaF^VY)7NV z`_>f)BN{#>HnF8D_vLE*F#2lB2CWKysw{ybIfqQy=##B31N4E8R|~(;*2Fd51EZ?2 zte0;xu>2$-F<8)PCgO}TU)iltd(bBPhmFC#f;o?@Q>G{{Q$J!x&^xdT!G-VFPy>7H#u7>$F`&S@EAwK4|+ZPi@x6 zi8krkeCeVc{n=kas*qx`12QRlDX$3iK_tob0G{BGA4-3~j9gDcJr`%#9^7nuPo)9f zv3f@aSw)QB`eCYP1F!F(q%ID0&aG->_@)zDYinZz_<>|TnM}yQ*2ipI_@E_^Xe-Bl zELLnh-ccLHZr|T= z{FUw-FTLc*(y_ep7*vY*Ohm7p5Wd>ZJZ$>%Ny%6y5G87!eE!7#ey!h2SypMF>+e0e zSSC+teIH>~Io1KVQtw9%Y+w87Z$f8H$2s59r6T(qx(Tulxu(g#eF7$=Gs(V52TzvF zz(%R7qnM*uQ_0@rJm?wd76mETe`ZO#&+`5B->lL{%l-7d!U93SWKldRnIQca$5GIx zrIsww)eis4op69!0&*pByp4dC1AvSVZafe&0ShF)o8zv@XiN6o9S82D0L8=eTVTW2 z{#pS+(3k)Bqm(X?TWfgJ&!VBq=KM6%tf2CTR~UZFkxyH5|7&#;cJF$9Tmzf3E~3r* z72&U#H~s(K`l7#ME7p{~RdDyFMLX1YJ`Pw&2&-ZgQ1@HkcP9s5@uu1O7F*EJ_v`|DoHATyP1N9M+!Sr7lRx zIA91pV+cZ6Bm?~pA1_LYd6C5!!7{l|qTdC(DxQIT_4d}cXiTS~EJ|1%hy2PEjZMFC z^K?_6WSkSRRo$j@0AiW+4~rqDa1fG&vta`eDiC3Dv|mggY*fAZw_rn?XEb5h@5eB; z5;+Qx6*Ukv99YK*axUk0c6NZND|!t@v&Y|Oq-o&Y+Yeiy`}>P;AhMPpX;SP9ao+gv zs;w|h<1X8TZ|W?rS2F>-u&G1swdaBQz8>l4s1-#EvQqAXaUw;n(9*nsO&uh5^O>cC z2}?{mR`eR)qz}Z&Lc4dx4BLN>j7nBE5PY0=nw32@4-O0nW5ftdd<8uE>W0JeVnMGc zE9U8ztwemLfH(yGnXXZ>9~NS{0>*zl>RnD%%}LbBSI-lF@aE3DN3peu547hW3Elz| z#Nw9Fs*!6b^J{TELlzE^S7KfO^>??50G=#dh>b#YAf99e1@HcVucYcyzO+fuDu4B6 zkgLzF{$a&(!`W2e3-e(M`yYdY;rPo!fQ{#Gf#O9?>8?brW~ajQJBx;{RG<%65C?tiW_Ptk7jWxeu8ur6d&G1@DW^}s zWn!vtAWP+m9}zUQ&&ms&k9yg%LoLAg;zg~CF5=NdYm^D z2dAtYD~vf*#%^cXCZ;{E-yFiBKXp-dheZBabrVW_KN@yTxl%w+U!R4MEghTv%t;(S zs3G+ShXccLpS%y1eGs83Y7usytM_S#9$7DZ8XruqqD{|~c@SWyn4{UR*zT`La`vlL zFAH5W{7j+e_6RiX(OOFRpM{0o4%t?I9?f5wv2KD!RjTYmgg9hg>>ZR06li;GY;7$7 zr+2--IIhv*=~Cv%`LF9f=|Znf`u8Yu-jG|73%KQ~{XFm!!kzvSBc1ww%WL17;>(Gs zwqj>kC=le(D-Y9u{&cgkKNp7}vRc`h2qq}|1=g1HowLR2+S<|)ETrWZ@{;rHZ-^Gh zSFRfYKJEU_HUj`OxMgI!W+s-B;_&gs=%SrUaTVF)(7R&@;aeP9>Os5Ds$xcA(^dr? z@9#fn)aih+pAAPopXrB>64@MvZBs94oMf+9Lf#L%ohd1JWNds`LZ$-^LHyO8S>z=y z9*$EQ`x4!rW#sq0=}E_~SoS24+lt$|{$Ykmp16H}6|MGLxjc4$m$p^bPcm6}m)Q8k z>RWdnjlww2dl+He2yFh{*G1T5p&F83i|-Dq_1~}2Z1%qMMYA%q(f7OgBt3Kdo;(&p zOYMTQ$^)IKyJYK{crhJUn&r)qezoi&F>`hPsr{C=pghgoG0%|ZuT=~zJLc#Ftl(@~ zvZAO~K$Y}l(|IJB@DxLSI6=iC_!XZV%&QLC4;lQy;s3?lP1pV#1mm$2UN^aN=WA&z z5r;!zUdqD)j^Uv~%b{RmfM46CQ3=|SeQXtFm*$;Vg4+U(Ur67U`na~0G;^GNJLM@; zH9ULk3bVG+Os*sJISnOe2$xLvhS?kDRv`{-3?B(RJ@QvmOk#riY7Lz3sow~i^_D>op58$e_Mb<<;g46>ffz%tSS0LTNynnnD%jCEd zT|9y=788v>G@SVIb1zU#{qdMo40gcth06ZGz&}Uxi7)%+DCZzUL;q|wzmc+UH4fPI>kci3PyWn1tTozfhe`lgRkb4H=xee z30;i;Ll7_6X^D1kcW4D71cdn}_Y077wS6;Tf8^yQ5U zey0-rPyFtER8}tEe6?L+mp-ift9Egk z^f4Desuwv$DspZz3Lj9lEJz@tBMq3xyX9s4o>Ji@T7h&1BSZrcq1@Vm2PE-ZziT#Z zi$7uSImx2BcVE1s%LtGBH}}TezFt22y=Kg>Lc!=6TD=7~VnBgMSq#Y_J;g>qr} zBAypT`9D`(9-!JE^@n5oGTaOtMSLQs_==ZfyJb*nu|N}{t%hLzj~?{h-#X3XM9IK7 zN?!LsOvcPVm_8yzxvZ)**fJZC48U}FfLaBh>%SrU#exlhGS1uB{91&w+TvWCLu0K( zIidaircUQ>o$mg>gFmxXd!HOypb*PBCBz@!NzwL}>So_?4z17x#s5J{yrh!;>sr(x zESMH#V`Gz#TgvCW*Yt1|@5-w6UeaLbfOlJHr#O`r;*cMJu=r#La1;mxiao$98UmcC z49qkNadqNg5&&c`;!{QXv-azdd6wtE?eB|m@Rek3vGO1#Kg8B~|8S0PvM9so31t&4 zo91D|FWNMDL;BZqX38@bmfHk``zK6I8?DXT3S++1%jny*tR5n$C~1N8`ktp(1^!r{ za@f!jG8L&jd}~qr9fK7# ztsjd~Z=rIB69m$!a>#`8$<*=5tg>vX$VW91g>#dki&vk|41n(*Pcdj}p*+kuy7y>4 z0dSM83~0dZo-eY9f``i|3kAa-*H;d<_+_`Snq8b9xRF zU9o4OngsZ)_}aXTt{(6)m~Y{0h=O9aW{2+tzUFJCtv~{BGr6`&`fKqx>`BR~r_itX zgVa(nesDU$?eikzKrst~@5!_2D{7jDkgfzZq0zPjN@}8my|4)_+ zIWhuJrbTN)YX2ts+MNJFN?sgNi2}%1v@&`GJVINsuA7H7{?BA~Is@j!z`A=Q&W?u) z*c}4ON9_J2i&mF;nXe*ps+tZP)?Q>-$WQ2y>9f^U@W7ed-6mfFg#)(L4n%B8#{L24 zAXY(V{)e0IEq$tA1tCd#`m zz?4)HKERfOda6W-?T;iWt1V{}?Y+vXsw*5|X`qsA@aKFc?`MzjC;$0qO}4>b=eo=v z|3TL7@kTv8C)f1#%j)_kV;|?ww>Qu!Po~JUTwPuN4hCAAjmcUUDwLQdRS}!dCe)M5 zdS=o|f2to$r6Bd~aE^9k7Kp?LvPcXegB#^9EbyuiWLp)T!Y=8rgAnDsz@EIiJb@kDk7S zPR4;!XdXS@cqX-M^XIpxsB>OgrDVRBnLC!-KSe-o)%Wh43a~n+@PVdX3)SF}-Xf+B z^>~*FO=eec?f>(G=Hkvkfu&8gs#>*Uk|1)ZsuF*16z0qCIP=hryUNaXt)HvT6<$k) zzls|$+1R63d?5ueH4w0uo>GEbzyjN{>qgAm$QAK$l5#9uK8R3cfO+8!QlmgEVtaRy z5ylVC;x+c@Uj9k1VPXG;Rnot<-d^vv+;JCIl*($;qy;W z3@?c`#08f#u;gWg7ov=wHT_(5{r}Vdo6(5I0=v2ImIruFwqpr^nF`EU)$V2F>FMdY z23RV-52U<@5Kz-qSkB8pQ%ioE3=riUD0u+@%^0+40nYmuP^CtP%QDq&2fhb7V@5$L znrLDWOAWw{fYH(k*!qylS)=9Asi4GDMf;APg_DxskKFLdT_k6X0Ekxz*_mL*H`}Xg z&6_bU9WWRdrAV;jEJ z=FJ{kOLiV*t2+@lI*Z$3!sny{j88sR!6%RjopZyjP+}5g+1?u1o>PnkjoQwtSP|&z z)vMR8)p%@Wdw(KvJ~`ZJX=zEV6qrL*MhF!r=`ltB_1=g4#>6gpJ*MpY@?tY6yj*jf z<5r+1z0ysYZh)3_6@?e*;PrcCdtYX2>#PXz_hJI#oUN_N7e0II^7!X0H0({Udg0$R z+#rIpam3C6ThO=g1j-zj1V87Ls;etbTK41m;Q$FcZPVPFeaWvm@s=pgTreA0*cq4$m<9r9h zw@5A!kPSh`SExxPT%aAbDZPt7(gr7;mcC3poa2Q$YnC>WQ3NBnxHo{!aU%)xc7K8@ zM&^h2heIhZa9s>H0We(qshs>G?qk6*0Icpu}&k) za1ND}!7vzvNQ_-sVr=i@J@5Jb`+NUC@ALUQ_jBLRyo^M_>S98K%4je zO}hXtE={L{`;hqWhCoI50Zd=lD#Uql1mW%YRi^#hxrZ(#0EbAo-sdsB%&6_{CNfw{FTOr;H)1n)@dm?^rhgWoUT7 zUU#+$c*`tTG3!degYUcNnpPi7D|qFu=Ip~TVS5Hq(T8_eyLBe4A4^B$QA-142hsD6 zmu=&!tE*+hcP=(vs46s1d$8pXc03ex3Tb(s>1otwhrH~fsvpwdn-Bha0}(`N+2g2W z*946@_|w+6ms?^*&+{CN{=q273Q)EPz&5S0^mx zkd_yXjg9O4$4}S}C*Z9HCMOBXD}VCHNR}SKBCq)L}{t>ZH@1 z8ql+BM&Xaq;PJiPZ2_sv_rFcPrD{gwt=hkTuW>Ih2-sYiXk2dIj#tdR9JIh{2aH;* z8{bn~1!nLI2oN=*@eBGA2E>vQLnEU+%$$;l{`lE1U%uE@IBtD??m`h-8&6X-qtcX^ zo;(hAc8uVzsi~>?%9cHrN3!)60};xZC9eOm!c0o&fAg*`D@)vgS7E39bE zQqoUPtPkrJrUfkWGO&~ z)D^x-v^+-NbyLwjQb^|JozEv42}hB+u@_nJlpRU$wzPBG)(tpY8;;Fn_ffMbefvd4EFqSS2U^4^=^=N#%+^JF2zcZPrc5QF-OJro^ zOn06@9rYPe|9LPBF`W5S9)B98z)t+rCSq#*`n*{UMII2KkwD$F=3rbNZ51~Z-pPNP zf1LBIOrX6@x0;8P8udvnhhI81^p?(Ln6?)MXh;l8eHY zX|};R&_HDr&C~<#smUyIP=dKD+QAChKgPo=1vsDoj7+}9@%Hy$etd-I-Me=bK8hIH ztvgp6|Ab;iu?zVFic>m?ta&V*`ZNUKfLGVPd*xwm5~q{2mO{RFD4OM7Ev-fSn+`Z} zGWx#v!um%1TShmx>r0;9{CcnEjHOVwp+%0(y;j=(-uAlg{K#7^?7(AkF@Y`<_E?dAdkYBH9H3r<9Oz+h&y`gU`#am%*tB-ni!bshe8y>xncFP* z$J(19lv0s{MTeCv&aDkWaD+>jE&+@)5&PtVj!vaWarD|BM+C0!@2o- zlIT4+|H5(YBD?AoR(`mE;3OX8oRzH;?yOf@!b=O_KW(qRG2gJbZ*6&~ce0Ni)~)^H zENo(ZPT5g-mub(NP)g>;?NWu_*hHZA1JOE)P%Iy?_pq>BxxZ3#j?O#yU*8SzM z*S(fg(7yZL^Cppc)Xo#gxT?UC#kLv23C{XF)%%oMv`IVFR1d|7(qy_d)`BNK+ ( znf-KDLa(woSY`}u5^sRG%H)p0zafBSkcagU?iRuV|D@?Z^-oQI)g^zx zGbAU#u~ez1hnF0EbPlir03@d1KZz}tir%R99u-FRD*PM0Qx?K+i%L(I6~e`AD6rQ- zb`WvC9|N)?Izy-g0Mi4YmGZ%=L>$V-rxQVzqK5ANHUtiu!7&!QR1Y=-fY>bfZd$o` z;Lzo#Muagz-o^Jb8J!wL;lIVQ$6v5(FjY_w&q$*_R82E@#N_6YzdKX#>Roaf2@l7_ zq&iJKzR%+#;bYl5W1P0Sq`$?Jz{R@2w*J?9-QbkA9zKbBU5wq&bU&UzhV2Tx9of}d z9yoA@c`N zp-|~k*?udz+4{>--(in`o1D%z@me$76LHdc81xZk=@I6>J|mENTasmOfE+|hPQ9Rd zsr@AkKi+zmrzac_P9L>OGx65bldDP8#K)$^@DIOwe&Ew@ly4!>R^?fn!Dfi>d1x#G zS=-^=dOAdD6q!Y5 zNW3PqkNgQA=l+~~lFBcGsO{k3DK`O+?>))nE|G%xWE-3l*=o9YIWyt0jaZDEhjrjk_4(5go_ap7+xUCC_mW4A=n*sWt@~(^|{zZM3arW9a z`-D=zsB+J`!Cnhv*jUQIep0O-+$IgHZ7%UR-FAMJdv9 z-1=2bkbAp%T2emvZKz1~#%FI#^W9+%!R^ec6&?ZDg74_}q&;`R+6x>HS-(2RR8 z0Ga(6X_<|=gj8v1{@mt!&r7%qjYR5}2EWQku8JI3ddX=n_f7F{*cvMYOIN!gfCEG` zXRfR&IubAcaj^bGT-E2*LQi_|MMy_9b|58gVG=hzQREhxNi#w~-LVMMylcC_DLJMQ z%ak`DO9Z}p+&_et2&=y#sFFISK_C#Cl6@uMuDAEiMAwjz z+d8i^-ZPoZ$w_yXH@OK6Q%XGtW)?w7u=1A!o}rm@caPkgIg)*c7S?y$Z^h{20u6CHlr_>0%74Ri2e(3ELf8v+IHp@mBg=Y~YT|vd2 za8fbtgXy}VDJmd%%vd^2kETg0qqfNAoW%l3!36vuE3@BXdN!it?6uIZ?Gb3Uy$dCv z_VXG`-xVdvI8nnh){NDAAb-+K(*#<-|sG8t%BP|5F5U4&!o5jw(ALJu&d&@Aog{UkUI+#fO#TVJ>PjkCQCP)8dTmo&9wFXS504)| z4qd(+F#QB)ijlz?V`4JfGo&Dgdf82F!v!60k%W#`4q>Y`p)j=FkHPIZ5?8UYu_1Fc z6bqrx~@cx`^L59Gso0lbu~%F&UmSY8$rcF9~3-;Yd7}^#+is?$@0qn&p*% ruix3-n#r5Et)?Hk_sMy6xn-YcD^|N_ie`TB+l~unV5(nn!!_bxO6%2) literal 0 HcmV?d00001 diff --git a/doc/arch/cache/images/cache-stripe-layout.png b/doc/arch/cache/images/cache-stripe-layout.png new file mode 100644 index 0000000000000000000000000000000000000000..73b41404aebec955545803ba0c36e8c1f48781ca GIT binary patch literal 11594 zcmb7qcUV)~@-{_55R_&p(xpfVNEc9gM?j>rL8KExKzgqVQbZsWY0`^;L4hPxQBgWb z2Ms8o^d=%j`fmsIoO|wbzx#am4<*@Id)BO3Gw-}JYsK8uRi`9pA}1gqpw!e*xkW%g zhy*_$oFM}LAB17Vz(0hZx73vgD&I3Nf?rNKD(WZ_5L6|cJ+M9vekZ%DVdhCdAZ3aF zN5~ka27VBn)KpP4^t-+KDGq<;R;(Jr)6e$W2+j6gJp@YfF3KuO&ocTR>EUj?mG z&ekyS+zE%=q^r`ui)@XtdAR!Fq1ostCk;w;;DWBtA&scT3t#=Fq^UI;Q@+A((dE}% zAD4sQ`!|;^NgK^?dW``lM)GX&EAxg%zI|RM!An4?o)My<__9{?)|R)SR`*Q=pH_Nn zYwPmZ^TK>I*+^Q!{fyjPD>E}iWf*NaRF$j{=L`k^ey_Vb-QwQ;`DI{kz!G2OmQ$6{ zQ!HKU*NJ-1@!*n@5@Qn+5dcPrA8Bf3m79?fUL~lWz}kAPm<1fi9Y-X65sJ}SM?=#*m{MyeJ zAz$W95+r1QxfqI@`aT}tF)_hhe#SqQ&R`}X^x!Ko>A5^=j^p=8&YiVS-JZvD`JsKM zMVt?Xza2^We)6Vg(n9p~td!WwfOC}N$Y$&PzH|PT3#02;-1+3o#>BCJD3xM+uFq$C zRikLfX@7gQH)v);IjPU}9tWbgudh&_2p4nxaC6A%GDb15gb;VjIpR@X`^b(I2||CE zGqwMf)0o;uaQ~MtoII4IaQM*f`GJz!vYRNJGs%eYk3>|58QML*c>g&J;XZ7$bk2eq zlb%HMxCG^zk8@_NzTjqNOJ<=1k1K>2z^AkdAY!+skU0LdnsmMEyVcGWgmY^~2nj73 zO!^$LNg)oYA6F)PxASQ$1zVjs!uzZaK$)xpE!llN@)T<~Y~xIBk#X`H+N}b3ip}Jr z#iU0On|R_lQxnfI?8)k*o(+G$u!zGk$?tA#i0;P0iE5-G(uUC^9EO0767CBs=Cfxk zi-lB>b#BQ%mSGT5p6P#lxC5FuuC2enz5jVLs`w*9y)wVq$8LOuX308lc8Ih#c`z6V z&AA>bww@ET*@>OiBR^aWyj18zn4Z^fBMyE!M zEmH=JGqPK~yD_RGJW)JFl;h$F(`}LwY}u9}KBI!7NaAH{WdzjHorZ(fa<}Hm21l^k zT)Q}_6C43Fq)+3<;fusD!pcuXYp}6}@PpWov(G28TU-1F)`?6@K$9#f4W+g6oiQCM z+S_;*#khHag!?f-M3>&BJwW zU0`ww&(dBqdWKZ$exg>#rv4g*3jidr>d42k=lggE{r7j&O?s!$7 zy)bm@SXyA-^!1(lRp?Z++t;kiQ5{6(D8i>$qV;*jKza0oZsXmC*399yPaieZAK$N~k5;eB%kJt_u)WyWa{3e5)SWHb#^Uk9^cn)3|VvOj&u8!}~>j_?8 ze@(pUqJ>)7AD8WWA}e+MItw!~$8$Yt8n3Bc9Q=7c^_Ne+ldbR@T$|^fVx1NDu{?w_ z8Z!b2)-$Sg7UA~2i<|**JH@%cX{9F#|`{?42Q#oUvAPcDHp;jfV;Scki!6OTXrz6#qXZZ zzp?%!_n0}}VX+e@$06C5j}XXH1tX0z_g8i-Wu zXE1Gx=#LD~!cc?VQnlG!2qRe%C5!~~TeyIp8#=X~FRZn3qt-p6@o~1!Epd9AE~iJ| zZc+ubym^7OVRZi40L5v!nOYB}4d@5Ny`3reK9Rx^KV7O>TGNH()qHqWuA3WIewX6Z z_a3pn8$Yl5@w_a5Io{l~DpJW>Yw9tk(P&qF7~B3Rpr&IKru~_MJkfV>pXfuDXnieN zi-(QYp}o6X?aK;sJ*G#^#n1{mJwR7tP-2cyeMYfJN?5udt!M4HQQ*XToDi-8;4)0l zW~>U-TyI+oOldo9{(8)~A&66Sq0^;Cc|x$77$y*C(kLL6+?Z8ocF<=3SM;bZxBS2h3rU#zwa63@^KC-i|j{ zkK4`&CEJL+a*N|i^^qFqLw9!Tct>ZcVjJD!oNuL5F%G8j1{(ASiU+ptSO%PLOxo|* zOT%i2+||+m1|@GB2=bN`(utLcx5sVxXL4%-M)BcyAP`8$J*OLz4$2p|gp!E|jc)@U zKj_YSX#m?hQ32nzhZ>0)hjJgkzwn^hp3I)&tqzDxSiWtq4Co-L`km9QNdAnoj4KrD!;BCQrxSa~H zLAGcVRcB@=fRSMM*5`AI7g9_Qt56I&FPSiG9BIpL2%w62ame=Z*(6-SbprZ#Pq*%8 zCtr1i7Y73>yCPKdLKatDKQ{V)Gtj%Dk+paS(v!&Ew21EsH-1Gbz8tA?wRb@yWLgwed}wG4@((iw%<3gv;xS zTxD`kU?ueB=TR*ldH@^keN`k8gqqQ-ab|C^=a*GdL2& zKO=g3v=NV^J19QtA(o$!_)WNW;V8i`&J(c({` zJPXp@W)z1HLlxZP9Er(Fc5s4l)8b?yM*ynp!05UJy#qnaX0{HJH>c+Gva++VJ{|+iM|IZw^MHv1)-EigL*GMsXEaw!#y|^y-n+$E?D3sVl?!M7 z7C~jfzH$2zWC{nJ%Hgf=yW{Hb|5h;{H6fh%1S#HGlOg{x)cn|3RF1Y3)z**Z)_7@m zLj~$6!XF=LH?*Kp1l4ESg~rH2v0+Wv=(4i1mDY<*P%4~_(~v|>4ahlam?J~L>%exCZ< zL;q(0`@NIwpq=>A(`N7s7k3tR+oHAE)*<2H{?d23-DW7~RT84k7MBM~Kxdj}hOWcG1iT+yxPR;c@&>IF-tgmUuYDT5?`JXL>7@f#Dqk1+zPI9c96u|`c1;{RR&2u%C zroJ*td+#J*Y5CaUQzq18el$NAh#1(zBKZ#Tyo7Y}?!6m@3xGlDqM93b$YITa*rn|? zUd?o<(9LQ>lxCsNzz;50l{vCq7&6|TbrKmKzd@(fV&ajj80zmXuc!xvTd~VTrpz3l zGeZpQ+e*;q$-=syHIclt1X6_*_lv>x@yd=351yj1|wh%?U1Es z$_K9^6ArF93BLo22h=>5r1A^5itTv*kj-JX%l`J~I94eGSGL9FrKCW9(ChuScAf77 zx1O5EZYLxEg{1zpET=Ankh59#nT~vbkeK6fq|C-2d%K54@_38684_e~x z*s_yZTvCN9?PD8MU%16FfI|5+Y3ec)%vEwRs&}WD_?)RT!QTWC67DR~WKqOpralzj z&K0L`PBBS1Q{!q!pfh`49Pu;- zS9iEQo*R1PALK{$`S4j~C8SaB%TyK@P0HN57Rv*lND*idOn{MCkf zs4txs(g8l%gkfXj!!>;T?vqqPXok|HJK1iD6sixM6&$$1dPU1zJ1Qa5fAL|@)UDEM zq2ZfVH8mNe#t{}jqQT;>6s05`H_(i7h~7d_x3(#CfRlz-Afi{HZ=9Ez4$to2eOuO^ z(W=vz)ut<6{Z-oVV|&SZ;YI?b9H1x)XB9cPZ8BtB%aDjAe;D4evVMlr0&%GldeC0V zVRH+am;RBPkEvYb$z1fZRYUvU*)s_H5)*}x-b+#EE%5tB;#BdP_42E?m&EU4$g7IE zulBvrwOB;Y#B4b#6;sXjz6U)eRLC1 z92hmQL%FCvX-=|0)OYJ2#-|My~UvIO8p zVna5PbMumvpA!$rw-utaimp)LYVFC2p;q?W+SbFvL)fSJO2Ua#uxky=LgF5VTQL4rMa+deN_$w9 zkb;kz0^6!DUIk^jrfT3CzCM!e@6f+PaevU`UD_ynCN_nf1F`y{>Yf}N?McT{0jvqm4G4N(a!2OvJuFsZw5Iu~C|GG9(3#pdFnijccboX-JvL zj4^Dt%p?8?&c-fWx#A;z1k@l}xBxd8f zroRrvx$#;~?;wbjgWwl`|;lZ>zsu~f~@EwttL7v@7u(;@A ze*7!AQn2|ER_})gSpT`fH7a8qf}PI#Z6j$Y-nW0;3ew0MxPe35yzbREZM9)H3(Im~5=M zOSBK{sYqikx#|XM1O5o<5sk5QQsyMmq|Z$=3PW;{PdeiYo*c-Nh~i>{gTS4VLhAv$ zXl_+C302fPoAg$H{^W8by0uzcaytI|=)Of1k@B$Z+V}=0wIPzJQ!tZVhEx8C6I{az5150Ij zqn>vvJ`b;I+&GY6?uT5zl7I8M1A;?|x^vv_|6Oj}27YawAZ0hSI0SRUVZ~D!xp`YQq+RM=|;!K zY{%j+;!EQsJwRnWd#PN`%)V*U0>A?x`%`GZ{Nkbd+kDWo!#@l%6M37&qt)aTe6}N5L}In_NMm}bWHl&q3{bZ00bqGTfHva%Rd=Lg zqo;PFz|}2BLs5$22)+xV3c#C_7#IVwV;yaD*-->c0~A#txAfD+faoD(6Zi`~C^UdK zIf9@od;f+5ihhWq00D}`$rRLhj2g7G6OM`WOGH?wH*w&bJBR^w1-4U6Y2|WO_7)%@ zY@*$|8ygO&EVSLi2VC(mFTT70veaL1j{_%fn<4BgYnb61_j)29GiJi?r+uz=S)tJV ztz#)3-@qbxiQAUbSvpQ1Cdiegrp}3o8SHS=oTz?wnqm}^BwuRYuHeOV66GlO#FwYd z{*5||_Q+jyhFe=tZ_IX|%#IhE9^DLmTy`^7HYx@^%gPRs)R@PFy#XuBqmzh-;Uhj1 z+xEsO80e%6xheCQr(e;g0*%qfb%o}E1|9mf!irj#r>L#1Esz+B3mUCvq*SqE(If-t zV#AIo9H(gTI)12lDHQj0AXcdb*`FfQ{GcQixn1c;seS~kGvr{&X9d;zG+m-7_p#9j z(j(ybM$i*OoL+`2F<-1iMCOmsJ)dSc%DRN78R_WMU(0>0hDjx}kWhv4(4G)Gupo`I zMyGxg>KCEuUw+ z;;CQ=`jVnDXiG*g*PSMK&y6lc$Qb}rKHaB6+>gSH$NtHjZ~QI ztd6T67qD=$y_5SnMQpyRHv2Ay*2^hO?Nn1(SV(y2VZX_7$o}NPVOQuoz5Kn#zQb`+ z)}OLRaVdLsp2w3U^6eBEC;sTmSB#pSwZ~aqm}=f%Q@#+M!}xsK)WRZL=pESMH#4N( zfFFb&=bF5Fwb44{c4*m|8@iKwZcYBme(Jqrtx)H~-Hn%4KS#Pk+a5?!?bXemn6mKU zG<%h%6Dg2KyBdmmxwUcp`tj^!pe--`{m~oaOSW#Sg$We#;dUo+%2GU!D|l#}$EM?~ z4U^xrrYuAC{F1V12B0r-J5m>t2#p$N9KIU8A-1)SGymQ=4JH%y#Tx8YXOZ7T&<`>2 z8LpL$4@DU*H%zJu)S>7#H^#{2xX-C~*z!Dx3LQt!Zv4E}pyY<(E`*2$O@X8q8l?x^ zzjortDIv~r#_F<xW#mA!7h-ujv#|LEgIu!&c5JeCP7VbuhF8H|3>a}uJz5&(eA zyj26&^^6AeojA}fH4qoZkHyTzyvHtqjB?||pH;ya29E&Zk_I6hePwBumRcrYo&+n2 zkH_#{af5_%=!T)kN&It`qX_U4pW+%g-j_88?QXQR5RIJ-PsH(Kj~&qtOO@&%Y$83& z<4_!3eqb=+ldzf&@b2K?psR~~4AjFD)Z+!VYvmZ@kXd%tNjt~jW$5vd?_}^}crh*b zn-gvcavII8w>%(1^+XwSLBYA=!%^-)W?7?9e=P=!l>}=iJQ^|o(nd8Lt;+TFD`@Tc ze^r})$e5-Y1S9-IybiH2xW9i!U!^b>dD&Lw-=wN`c;lV>EZ9r|9lF32$LaJbPvSl~ zh;UHQI;MdA_OZEG?{FkWl{6*@F8ljA-fAs4trHLVr1TG&qqVABCvXm)&XDJUMI4+&~&$e*6rhmqc=WzXEZ}2 z`pQ@xDBfkcHEUK3g%8x%%B+;_Km6nS-SD{d zoSfU{=15!ijrW}FrB^{k!O1I5T}eCL;PKxK0G#D`N9*$}y z<7BElhu+iPH6-}S7vT)g#s_X{5tP+cMnYn1Z~1w9wyJ`zJ9|ZnkMnZZTfpZ@Sot;3 zeCr0MMn=DXQ)s=uVo&GzKmBz@j6*P zxnL#ZB_)NWd4@`DgjNHAP3H502-Ss)}WM{)mEWAW4i>=|5=4^%^A~*Pks+}q>P6-lu#1fkoH`S3|yk?8B-}H`B9?iH8upN>bd`RDFtHrL# zc{=Qre3Hk&;x_Fus)M-T%%Rc63}{?wr?s7M5>~gD!BT_^1x>KPa4gRV2?!02mO#&2 zO(&+(WuU!h*BvrB(HPi|>23>zT#z5H6lf>fN|e>By|&=T@>uTO+k_$BuUP9>!{boxn9zJfH*!^3$Sk*k4my z31W>U7x{?WD|HJME-koKi$ZMuyF0+x=K)tfi9Paj1>UjoK^n47R#*O z(0uePJg$DTA3RWrp+!MAbVvEPs%!fyamc10OQc?BI^7=@7~J|m!9Dmf-giaNvx$$M zRT7be>lb=_8caPE0>h(GZ|%W2N;J}{b6dWVS*u~D312oIOiQAd^8NNXz8RWSyP{~L z23*`m*kdBspfU&A4fT@k7>uVQO(#h!R0p_?Lvo63$xh0pn)i%BuBY8gJ@ntoeTHp{ zd@urG(poON2^bg{80{b!pOsxptT`o#AiRjVb=6U1TR?@t){F|nEa_5gYHwe~ArL2F zLz9RDFH@o5v1O;1(Ln zfgn~%1BU-X7on{g2x1TI!z-CVKmrjC1Sot2-6tD^uzhs5iLe2SE;EERA-Vk`^5DZn zODEFD{3M69puLA9uv2Z6r@a1wte_+iT~4446mKaF!fJFEX-F)AylT21BL2cCq>OUa z!%P8Q(%hf~UQ8%>9l4DH#-3YQnVNoR4wluK3AGB|G6JFaZ?yg|GH)W{Y4MSygm@_| z1o2z>P^J9foMm%R#ym|*eGWEJ?oJsVo`0iWBqH^7i_L#rF&;CL{vsIyE90Dq@4@B5 z7Io~T%KdbMq!+kKz`jFfE3%7NKfIQ8ykV=VRs(S8AP#-d@!E)pp}PgE1;YBt+QIw) zu3di@G&KlDc<`!VTBz|~EPmY2z=YQphxR`B>0Ui}bhzJ1FaDkW)z4NI-w6>}*}!o` zZSe8_q@xs6NT_)m2e)txd#+G`5%c=Us!D8*RXV0#bQ9KjyRZEaA zyP^X)d>CaFbL=zQNbxoQVs+%v7T<#l=8@(dppevYW_8cZNled1{@l9+IOr;xMA?0S zsC5+DzgN4Vt0pe?Je|6vvp)8Lh{b2hv6dHpHuGdd8hZLcN4p(lv0y^GTnH6U88yhq zragWve7mUw$LScn3b&H=Sz&OPY*0Nam{D~tFcbx;S56lJY}J2Yv3o2{)=+#9S;{Tf z@IbG?VG!N=bq4YNOa6u$uzmksf?nT?+MYAJ2Aqf|*_yg*B7@bJboqdc@RIX!27$Lt zYk0TU`Q&&tZet5HM}am2hYx(#YMr@x+`V2|H5$g5*rlSs^ECNB_8g0hxV*fcR>2+B zR!6xX8CFLdkJAwunVALgS(_@(wtDvLCet1LQ#*(xELN`T(p;yJh9D%c?12Ljy&17@j|!XT9dPHc?N?7Ib&4((+r=k8|~l z2fIItB*9tX0_0}Q22KKxoVqi+tSc=q?PGr~B%awpNFy?CqjUgP|4&|b6-_jUMHy^k znp8=nVUrLE5?FUPu+2U&1UZB|g1tW${7nn~F^Hmi92oDV5fn=AW$sSB2JckVf~Ood zdtcd(HT2_-?Dgzl;JR#dX5TPsN95V()2H6W**tqrVD+NS9RAdeCi%xxeRFWkG(Y(- zwl@1$iDM$`8n+BGec$7wLyu>C$}o6H$Pwu2FAUFtAOvC_K2V7)rB4umdXz~E*oadZ z;$|Z{!Ka%DqkG^{YThHX>P@Vj{34Lp zeD~e}1 z{A_hv1SVv^DCd8{nJ)OBYNoQ#O@QQoAddLi`rFZ%mNEOeA2ik{X`xEc*mzrXW5Z?x zZ$MD*TuzXq8UiCX)&A5h*N4+CBD(&7(tRvNf9fTv;bDS*W3!vOz+%TxZf0_jPgV&F zeCgB7$hok)I$#dVXRdUPLlsa^Z(-60>coI z4;CO9`P;{6M$)ta65nYi9^b?>E7xhBo11ZqbSofyZB5SqRb}8wHi*y^SLpw-OZUMu z=FVj|dAZa%)QaMlx1A$|hzU-;7}`LFY}qG-f4M4n%&uf5$~F&e;MH-4WXEM-_dcWZ#zuI7BF?W;&D0 zQRLvMfacTXwQ*$7;j&dOiVgQuw#&Eq2TPN|pSo8(4V#h%_HSSRX|@<6beMj1AR?XN zgJ8M+cMBwIW&9Qne-{LQITLpB<6rHR3M@~2h%?;Vd#i5tqhs!qoSeB#@Rn>l4Ke;w ziXo7!<~B4sF)`tj+uCZa!}S+*!Y4k!J}yu7H`#|_)#}czC+%CEx)I#fA@fJwcyJZ0CSyM?8*5rUe|?(SYu=3 zE3wupYp;$&45>f&eUg8I4WBJQq8dsXONAs`nX;B8`);hIBugZTjD24cWekQEyX=vS zCHsVzG?T$lzdP#v{=C1R<&WRvftlBR-Fwct=RD8zoY#GcGQ5c7WD{b8Ac#{}=j^tbe$l^1Y2tMdMFCouBWnFt|7mN<4FPw&;ia7So8%*Gu)lJ9T6M}ZWqy5sg zxE9z#&^|`pv!^fnSuH(Z^5r#2Z}G1QdHa*KxNexA>Nx&1p8H%|Y`n7;Z@B;D5uy7+ zyc`0*5b{Nd8nx2jt1^sP2&^1A91+Y{wY&RgpVldJz zF)=Zm930B3szbviR`nYasbysfDt!Y3Ro)9%p82dU4hJE+eVL&F=0cD0m8lk82dQHo zgiNh_V`$1~H-ft8$f|G+M;;4Y?yW+K`rEF5pIKU3LdJ>SWuzkX$Og}l*^UB%Akqib zp|Wcyjvfsc;tu6ayiPIle(g`PmmNg>+9Wm1&weyT?Put_pd4}-(!}W$QCsz{UAxw> zwd7h;T-2D43C1}MmDLAQzTabna_*nSxmsIWQ{d5LMbzw*q61#+o8R7Xb$54@T9~1n z@adZ6i8m3n?Hr3pJMM1%E|PO#0trDn2v*N=r}BDxv!I9Lo3pC*_4VT+zfNiFgf!pk zCQucwVta;$d}rGE6r?Kms(a0^&tdNUgmhS<+{ZUJgM57{P0{?MT}MuwpiV?^r6bVK z#JC~xHmPQ`jg8GcF0onB2lqYay1c!-3?syNAn^*Rcj(LHQV@pjke?(~qe}Ow=EA~* zbxZ7;eY%lU9n}*)i&#;H;2*`PB|;6UFIRuF6@uEN1YuzTf#cp^d4~*gg0_AP$z3T~ zn-crwN1DA5>;{E0CU8(*RaFJ84m+rxMFdU8YCn83v}VY9b5_J4sP%-Sl~cu)Y>jIV z7Kew2*MCx%6B*av&_TO(=n1MC8sh^4%5ye#cD*7IV#6Qen3rz~R$K1RVC^E@&9J*w zy0XU?!D%Lm@L3+)Y8G}4`nf*0xd!krMF*#*q}+0KotwOI7#S5m;g{QNXJ;P$gSidu z`>nLgYRqd~oe7q+@0!De{Orlps<{demRfFd(?PnDba01#<>=_>INNzk#Isou!Ox%6 z5o5RlZ-Q{fzTRi>qJzN=;TFrLq))f;6>nPtR+{*^L{F|Y@uLh=#r8qsx1<`;>p}RM zniM$bl$A8HzH20nc+K~_-tFG3!E3c}o##+hbrZ2}%(42WFWrs(uN5#&8TGmMHrfn= zJ*HcC;pVnxQ(f02=%IvjN5>Y#DIdDJJSb-IIaf96$G(!1jSb-r?Zntl+}NQ@<%)gLD+y+{9MM{RRzNt0UEtGXNjOS4y9y zq)1S%)p*<$6NJRW0QPLtU>Fuwz3s-O{YI(#BE`nqBILLvrf?aWK?il94i!mK4BVRJ=gVoTKu+T#8rGyO>yzURGfyrw%FJg zCUK%vC(ZNa<;$HZO5?)Q?MZUvjOB)32)T_itNQjhW+>il_beiuO!;IJR}ai2oHv>6 zH_#Zxx@XJb^^+BP#Wi>#O>5nB>JHotq0*uI%e#AEJC^nZvMXIDmYqMn_(G`hEL)rZ znJmUg%cVXvV-)c&zm-|7k4q99Qb-6S_aZTVlTikU^*bfW9W&wxsPF8hQDl7P&u{lM z$P|jkj{##PC8Z!`<;u!R#fD7}2Zy-|n-;SgTcVK`ANOlScp3(S768Y(PpxC>+KA9HF#pU?7lw!L0fWBD-|94Xa zCO_~-f{AsW5Kol#~uRE%Rr%n9C!UoJR?vy zu+L2l%GeV|4ZYRe-27GZ*0a>qbtR@ERszd;5`cR@ifuWRUMHmP_Yec6FpSy>QRw;` z(T)=crABn4vKvMS+s)x7c9@fp&kQRufR+DkS`$I_k@<9d%7jgmnnso1N{|28K?V!| zFutwJ>L?92jNk;~`m;%R>WrnKiyba5PrKwrVH15-R^nbfoI%w2ABB?xeL5r6$e+|1E;r32-svp{9OM03^=K zd3=Aq0g2Pwn~7hdK)s2;a&o?ETHf$N-&KlTuWt>b4glkI8mI!Dp)aiVhHhUL@KM6np#Kum zX9Ux)5x$#RvUcxgsTA2)J_m2&@dj^c67s2e`T5e)(nO(=*DfPC?}d5Wgqyc+WnPGg zhyayWSq;-oI(7O-;VFtq-Z^y*fA-6XmuunhhA^jnbN@1~$BZGV=8p8?;3%~2D9i5$@ zmm!I>K8g#MjqjM5FVAb$PhjFpnn2Ji-9ipYFGuur zc6B8z7PRLzJe!@Jef?Tx)x5*n8&kr$`|cVK+f9bb`%06_cO`n!Z(o$VQ2c(_IlaC^ zO%w4m7I?I?GhC5aq8ZI7+7;G>BEZRtYz0ZmWA+inBu2yz6DJw zQ8Xo|&1!#6^XBGB3|)KZ!*d3br`D&BZ_M4ZMGj^w2*#gmm0mAwM8EBDjJ*%-sI%B9^XoAQ`j11e zSj`vEyMKAhv`VL2beGsBH5~KsVf>=Kz5Uul?%zz9(}VUhZYEbxP(`Ot_zYdi3htdl zAn+NV+r3bNEXER2wb`I^KkjzD%(1hc06;Mq)BobQlksqlTb zu44R0+8~;dwQOEq-sG;=R&nteVO{6*ssj@TY*ePvj%?|>C|-es=S|J&eJWb)NA!8I z24+uBGr(sy1qiHHYNr(?rZ9_Np5K?5MIa~HCxSEWx(uXffmuX^*JmNI8*;xsYBs3u z9rd#*3qAC>**&l;SMFMkRkB4ERj<>xWt8(RW3l=7$+0D z&ucE4B{QJD+yDI?Y}`%^dckxh`5=2b_phM9msF{jWz;){-7fuIw^KFlytrRqaBOzV z1xZIs)Y`ZVTwIyzpgo)jhllB2hUGSn=FUkuu8+<+bGz6Ptv1q?d@E~1Ym2b=Nt#{1 zvgv+T%4`pe^nq@`2P2Q?EE(vn-gIUbUiE^HyGCnW4d^Z6f|VFSJvA<*GV&Wm$8?bH zs;OKWI+k|0D-}U8Kee#)Q+4^RKlP5Uw)^Pr;<>ej%)2C3A&(d5Mioc`3w_M4LL(EX z_rWq1VswA1tTi0XnZT9}{WgKefzqUjcEdj~}(?5_be&MBM?wpWmbvBgePsl0i@6;^MThCNFK2cjou#VlsD4&z9milm)@Y zTKO`rVUD0LNqzntbk{bMPR`C(UpexX)$jOSmI_!YyK)*pOIJ}*;jEI{JxBaLf=8bS z)Ie{OyOpALy^J4#*=QSt6*WM z?gU;SjGh~^X+&4hHp?7Joup|h;v|5lH;CTw)CAtlxCrW;@u*{hb}$l>p&WX*;9z`y zzJsIV18+KrE|i)OW(LK^#C-ht5j@t$GtyehyNzhwaJI(RuU|JepYsqqfo7lpeN?=I5Skhh*dmd7*&m?u7VEQeM!x>(j376 z8LDIwQ~@9hHsCt(us67#|ZmNGkV?A zM+q(OAg`{1wjrKnyELufcmddspU4?nSx|t_LF8lZ(u-DR8+3wMJY4i@eoUi#y`E3rtV$T(17t} zTHHhm12<|oZqT3VWzm{`X{e{F%_*?p=d zPHgTux8ZPyrG(#9j4J4%!BjF|uZH$!=g!mmPEKg_R_!vrgKDItqLMdbo2PyAApRyt z!Q`ps*4EZ=Ut99So(ASRp71RyWqG*rp6{6qClOi4s?B4@TLm*z^`j{qr#^0LJA}et z?P#eaGES9pZ~mk-pen4HmhXh(v%bNSGx9DLDr=QmwweguV_pZG)Ka`lv!fkr;`H9; zLRQ@VzvY;?W6a1$ z^pb*Uj|K*u&n@cs!k=Od4sDoOU)msgJSp?`${a^u>TQuR*?2>%f5*HWSFY@^oC9<(FausZ2I13kX-3in$3>wX4w|R{R#a5jDP-*@lN#D|1r)E6z_Txj7!UP z61mohMr?a99D?QYf#Mhcq1X+TqAjL>`7fusVYp}=d49$vN)n1(an|II2k0p4+g&)D ztT##NEe+UYqFez-9 zaU}oy7LJbc=gr$3x%%%GftIBeD6su25bt3sIMG}9xOM?a0q&s#`V(T$+Xa(aF=JD2Qa@!ENMK15ee zKT|2C%K^+w-(1(X9#y4`a=rUi`+|Dq7v)DxL#+ho1BtNjd~%F1i3v7lfsNn4puSBG z&d$!(uo{+?w|pvFQ-1#E>e=9?;f1{i;U_42aiW6CfVc&Y7j^n0L~-J2)&8E^3JTd z-AT06(GCRZv`$Yc1X0`_MGa31p5aG7E}&)Fq0jmnf9ZXsLqM_`ZU7h=?Q_RmCRfbM zZ~d(=%LA30OGDEB-4v1ahSWvLLzlNk180Q?5vw!ucyksvjG`Mx-3_zPs3z``5z*8L znD!p6AR;!pW0PnHI^sXKcj3x6`^x*W;^-+R90X}j<{gCmorJl(&i^);)~Bf1iwvX6 zGQ)|?42(1<@>E_Y@@LFvH=_9%C?F==+ln$u4hG<4BbXj>!zc@Y@(e8D)h6FvK2bevC(R) zZ~R&&MOFB7dwh?3&OXGCcc-3#i-t&nHp_kFKUMXQh|-=x2z|NuvUGYq?^1gSwFLR%*-x!bTS7XghU}c- zO|h`fJ5)Qr4B!KAjJlT^I54NLfh+B~!<{~b9(#?&!BbpQ^?Gw0y$qIGFvr453U%>U<2f_ON!whf;nkbNWBG_)t3H*b$ITAPz^VI_<7obc5Ejs|J zrjKpzO^)C8CI=&2EPyo-;s8e>v?WDE#Fx>Y(B;8CHaOin@W$UM1K{(Z)?o7!l6w#N zIf%f2*pTHy7Acb)Onml;#1RkC|R{sue9 zrzQgb{(oL?7oLB4EsP5I^uPKCxb%PDESkrY##k=t1qGGM;Ja6tnE$ycER{{ecP_&I zX3tflEZ^~>V@~%%R$L`2ecsL1n%V<>y{|6nk8H^e`shT^d34vy!6S7h0FBLd8lPX- z=zs`aTNqGP-nrRv8gO(gx=$z6d7M(Yv7ZxUzS%;~0NM+ok8%zrI|q8vQOvV=LbPfq zZ|oL)&je`9h7O^vKDPdtzhKIqw5x*Jm8MpiQ&B-2dlGN{vZ}C5;v7x%fV@irGsAS* z{+J&f@Dtj%$madNs`{W*xIEv_8S{YxYRe^4lwu5HYu}7USutuKC$o-ZDNp`*cd=Y) zDwQ}cON{1>kF@h$9;uE)t#fo+m32OSef6M)dewV-m&wQ}*z@%#bl}TPV~#x#r4B!l zZYn|`$%Y47dGEo`@!?`y7%BcrE|ePWbk2V958u_<{~21I0kltc0afq#MT=M9yzmbN zgJ`nXb?f6~ClMWnbmhvIzY7Ho81K=K=>Gu}hV7o>zXmzWd)%_d zvftE|6no{LFb}UOpUM`tQ6l}bzHR3X=$CfVejLRA4;RzY@n2=(|MO;2$Kv84!I-CM zX^TYS4VQ4&Rjnydk+!QrSR;Do3=ED5vl1`ay1B)>u5M>{p;rDA<=wP`@d&%z7VYW1 zjMiFT_*J*6x_s5frG9rwJFSV{VVtfH-oB1({$`Dy!u>Ifpl)51f>ZGUfxVWdO`9J> zHuqDOC_$Awi$J4EmNOym+SaS?Ymv~lI+Hg z-|VAaoAZd4K~e2!Yhr%=ykyY~qMB^Xiw2K+e>??BtDC;YlD3$UJY`qO|Gl>h7nc2{@3Mm#EACAO1sn@myZ=G%}30bR{DINmp0$Q2Zv2f>x708UA zncgNtHUY?9Fm9W!{r})Dww8 z0PwaWJ!5*KNJFA&Ojg{E8T`u1ii_UH#)hB1W(i|kdK`yyh}**Jc=Lu^{aa2Wxo;q% zMFV~-Y$s4K=W2`L~XZq|FmzP)RxHw3Sk!P=jRa_r2Gu#;*uF5q9$!tOyZJ5bK z(9CrAP6eT=N2813TEM^YAGi=z8+dB&8{Ruzw(6MNH`jORV``QUl~-_;%Y653==!zw z_@qo80ANrPTjAs>MF^7d`&B&BhjtHT{8`xb=iLMZz1l%wfeN(u`Jfb%*}n~PLE`S` zhreCqxc>>=Z?~By2Whfp$Vy+iyAw=1Oh;>o88XgoKO;Z`J7`466)HprlZ@KY%3YS? zaGID!U8^;%+|xTg-#&5hZ=v0u$J?~TzIyd4Q{X(CEkdC_>b(71(V{(MLz7^}3|5b; zo}Nlu2ZegBh!N(*adGD5UCgjP2TzK2bda9MS4|aXm)Uqpd9>Y{j=heo0Q$s+y+ubY zdnW#&#`JL?iD>4kk1Q0m!<48&oBItDobR8cqzEp!KDrg9rYqBR;Ar%{$a#$$pZbJH z7G)D77Z3Ku@+kL@ezcsjn~A%?aO8%c-&0hZN@pl|xgAM8Ie*KdA^22EtcLDRvh5?D z)1Cg{<@7o#r&uPlOY$hs?eSeA{Aoik&m$cZeR8tm1-qYLUw%nhH2?WYyjdrgv$c|PZyXF2b4&U2n~p7ZbzV|~8gPW%P{ z0QjIcbxZ+(1J~JQ9M1tZwWM_Euzwr@ruy1|@}84F*cDFX^;>L-%4FVc_#t-vubWPY?yLC z%ui7H=ppFOr{Q-+4ojXmu;g^k{~OvjP5&YH?+1$>aU1)7t(G#@9Gl(@MhBrSOz6fY zCRXcPK{4ey)6-eyS-XLJyeBt~17a>=;eZ(Pe<$!+~#4lQKD_ z9`ZC+=Vn~NkWI_9di3u0C^`HU95Opx7eJ$*1GeddyE1B0czzHAZFym0 zp{av|gDH=0BL}Hv_qqLNY6FpDMpBl+%j2S67{yRrGLRtJ2T{2TRQkmFGH^q#tf#~0 z#`kWpq=`ZYkp-3$6YK$3Ia*-84%E-nJFuopUj}fD(&q8i9l4IB8C%xHwKij26wgY9 zOJ65jspv1Iv<>;&3h42(gxj?Co1}zK*>e#}YIZV)jM)y1V{mPCN^&zpb{g*%PuWOR zqTPTfs%4G7_fjG0w?ApOij_sEjdgPybg7Pd$uX%};tbiuUYJt%hxerW!^%Hd z%4x_`h{4?{^;dmGUZ&$iF4;K2PGTYuqZWLs5cA>0?h-%gv;q*_n1LJy;S|m8)2e`o z!2{j-gyrgN7$y_ttxp4%PEOA_Suzh=h%NlV+q%?~EdF8%DUybC?;!3FwyU$RVKV)s z(=*0^dOe^xx7C)AI2)d%TC?c!@1tZtH<7fW{>xMB@1^3s8 zlK{8GBA`eZ)?r{#a=W;&z`<3DHVq1%>=9ETzhrw5t$T*?1;?bWY_kH$u$}tAAE`Zw ziML+uA%=gxDt924#H|`u2vAOGgykPE)vxs#8_2|_iS|4aRWa=*5>$NZf4J6|gs{4& zmv~atnjK`t$(>iBuNhLh zL%Y@oxog!QOS7?OQanqdGcG8}mRhsS9z9T!{hnB>-ujXd4UZjqcfb=P^urbX8CW$m z&#m?q*aZ%)cU5|MQ=vg=gx$7dvOzO8tfNp0Ge!iv!CjKpQZr+RXwIQyTE=F;3bDCP z&SG1pI?j4ZVNfTfS|cV?Cf8JjEUQA5-D+)$-fO~H6;;qAxYVQBdpH=49{Kr7l@4AZ z+pHc?Ugufpx%wL9MpyfqiF=#f_u-ZTN)N-Q^Kn%veOoIm?TC*xbzIIEUs6n|%u1Z+EiUJeL9wXx6&w zw0984M{;lN=q1c`ld^84eG&?ewx${@mwEAKehWe7rsuv3ea0H?5(Lt~N=ACf`6F?l z_mPfco+Zox=a`iu&&ZpFI5=d=sLL}$#x{3sQOQWYaB}Cj$gZAiumn4PF}3j5?j1R* zR|X4%ckFDajpo6_a|NCa3mtuqwWN0E1z#vvCcMT_uDb@6sqS1^dQpx)d{=gD4To?B zS&CkV>&3c*!Yb{52U$k?)Zb9ZY+UaQOc~iC3xn1J-vk8(Tb9?XJ?s4FJK-WZmO)S) zv>}sfnHmf6e3z*u$8V#j4n3Z+Yff4=*r?a{?S&=2tg3>1{`^QrY}-I#IkHZ7B6&Uk zqB0p+n)GP?-81Z07n`?+3RI@{RJfO2s%PBZZ@M#X#JPI%Y5cNQzEF^Q_kOxLxktSWo znhk%>IOoS^%low1p&IOeo>7B_3NKt;X)SGpk@`Qb@vpp)EU+O9$B@50CJ)P?v&<91gq#Mp3S`{qnj_DPnMKjr6!`=irB6x!)M+ zL31l%i07#nO>8gu>}p07c&)buH!Gd6cKIA{Te%k1nK8@jZP6?0XAC}VRc==oJeMWR zCE>CBXXkm9weY)fn$g(<+|2z=yu0yS_;WvIXDz^*WLO(z51jtM)4c}3b3sJ&z)A(!UXXbx;`s)zd8jJT-F5nl~Z1c?G9WzvCZr?38jUzrz6 zgMWy6+W+=F=^}5N&4wL=ahBi!&g5|7yax(ipC4fdQFpj|6l*+#OxBQN9suFS)^B0D zgVr4`P^Y<(Lbbx3$s{1*;2AEbWp$9`kDAKLGuM}yG{KRYOa6J!^^+Z%qegDM0)Zsg zy+;BUzV(niWo#=#XpK&_M*KE~cDNIU>Q~!F3Pya5ElWu5nen!)T~G9!U195LP*qos zp2-(?`csHg?Yj)nL(DzN#z6)P;OJ3hL_55V2KbXP%)EF#qY#iQG z9-n9Zyx5$93jJJ7U|G`(NBn!w59!tdBg+_BlaF)k0buDYxK)KiP+!;P+K|tAE1W@| zp$V=MDk(||;9qZY%)c8h*jqcko|gw%O}HRn?fv-GwD%Uj*=4OBMzDz|>+`XuQ0UZHu8Cgs?G zkh-~?(kKU8{%T22sacfwcK}%TaAXE-c!nOS-Vl0R)cV2Tttty>>^BF;rf&~WJ#`7*Y3c}?HZCQ0DSoHj*&iTjrJHFNjo%Rwp;wvtlv`hz7A=& zT<=MQ38UKItoX|a!?Vh!ayYo>JR#joc-#hMjCBTGzjSJ-rd81n^+nc05H|TZ~#no|A)9jJdw!u?FN%F&AsiFg{CcI zUJk_|d56YgTloDFiG`b^8Nys&ru$1Xwx= zcv+4!(4xqMc59-1y3Ob#Ue!!kDbr=X-o7975h09T80`nDq34I$7tJ7a3syqEX?xjl zUj^yqb*q-*HozDijau!usyTYpOh4>mFr%!QGwd;2e|q6Ek^i2eaXsb_`(y- zNSl3x#@SbveiY?;ocXe`^j%3RAXhcBJFoVzxS4)A(GDrc+4d1Dkm2{~zzJu`2&*8j zpmkwI!O>mL2DicPX7RNNg3s>G`j`#RR7uiLhkXR0Y+pxJkPBmD@Twnde^d!`T|1@0 zbg9^{Lk??hPD$+JoutI_vP05)?Q2F)j%aRtS(_WO;YnFEo>x%g_n@?Y#9oyBWW5g$ z(dIf_6NAAY9Qm_I_Lkt@TNZY z6`54nHx$O+LEwcC;4*AbsY!WgfYI&8jvD0FynDgc z>1x=R%#E%-83*jsZb^k0c_Sb&0)b$YG(bZLlW%R{Rr_efJ=&dTXatNmvlsz07yD86 z)0%e#I~BI{=C~G%DF}s(2>q=-+xAfKxPrsuGX%8sz@&Z_aAu$e^$9WjMzcxb2PbGi zTT%ujZ1KwzwbAG5na>=&|Ma)bMoH^o+|M|%V_*V!3+$~=pHKYwOB)9aTeHYZirVWS z3PWzwh{b=okgD@>_}W8<`G)~=Z#uwooU#(scqu>hm$QG36>%IM9ByH#2zeQ6WDb#K zd!ps$%h;sX$tx@JzvKgR;5F@wp&8DRQTa3?KSLKk;?QrJ2t=I`Oa~^ENRZPtQO|hk zNLC%rd4D=pdx@WwHX02pHMKqmO>At=x9=Ihtk=`Q3w`T$e_NNzN&uJc{lhwiqMf|B#J0h9a4|Pn*K_o?>C4{a zOQ3;=6KA`pquKNKzgL!sUz=R-VB28tI)0^><8^9Le=Uct-5mk9)y88I|Ft<_MqAWy zCVTvS+SjZ=$}7**{QpI~3^Dk^tcdwZ*grh|NZEeakRX35BW4C^ZZF=*^s?dk^M!=f zMC@^gE2>@HTRz=s4a0(tQ7l(4#cQhDofj+aOdc2woqY1iG1{J&!#^53P<`~B#S@j$ zkQ=9*_T$rJ2UcR4-RM-2TxSaR&nq&?oz^{qXSK;444Zm)#!K;K8cjBgu&5X`8od`8 zirVimV0&XJ!y!z%!8r2HVX4wc0XH$VV9yE8nfPJF(WosiGK@n}JRYxlzdk%6GpHx~ zs+k*}GfVwaP1;vB^vVI&*En&ebm5bT*@73Isav$cc6Aae0IuRQx;wO#ezFvkE!$)fdJ&s{fBYApJfXzqAM1}M_MjjEXj}hH>JT^rIKWA(>gtZ(1l|92^j?;r=4gbI z$Yi-@4-gveb5wqBjc(;F8*~|j8gtBD()EXHU+2msywJJuCIkZfK8e8JI&Le2L)H+nJ2SSD zGcpQpKah2+PcG-o2g}%`Z&$|)XHCj%AUv#7^27H~5=Rakflh;nxaNFV|dp5%*}3X;2V*FWSfL8L+FWYws*r03nj% z_exK>)N${PD=gT~rRb{iV@S_cr9$r}Dr4%Mf=h>8!$b z-8aj16;qAki1QWp!Ozt-AHzuy0gSW+H3k92q(ixl1VCKpI$u&?58&TAz^66VMD+)p z0RNgaV4ys^LZ#U|(uZ%C3V$9eovpyiedKVzn+s?a!NNtf1UYUWK3XC&(sFXbz3-!a z^}+3PntCr%;p+=yxeLvFoEV=ZqKF43=!R)xjfmbIBvL=>GKeeDWTnSGLgPY-^egsb zl}+ahS>(mN6@?+eqPzmJm*(`L3t_ekG3$XnJ#G$f$b6@v2ob#(Ab8T|Esemm$a#l`I%~7V$u>8COnmXraUd`n@)zIVd>m2zHfz%7E`RP{`%%*zwQ{YnNd zsU20d?-?5Z8^e{*=M?{4ZfF1+J&xXd7bRZn9cmzj_Yg20P*AFQANBfRPM?L0N`sA> z7Fd!!&Um$2kHV|qQ|w~GF+sOg=wp}04sc@T0;*m*b91YG;~xpvzf>>nY%S~^oC9IR VE*nv_*ss9=sIIY2xwg~2{{YOj6(Ils literal 0 HcmV?d00001 diff --git a/doc/arch/cache/ram-cache.en.rst b/doc/arch/cache/ram-cache.en.rst new file mode 100644 index 00000000..b0b15e1f --- /dev/null +++ b/doc/arch/cache/ram-cache.en.rst @@ -0,0 +1,88 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. include:: common.defs + +********* +Ram Cache +********* + +New Ram Cache Algorithm (CLFUS) +=============================== + +The new Ram Cache uses ideas from a number of cache replacement policies and algorithms, including LRU, LFU, CLOCK, GDFS and 2Q, called CLFUS (Clocked Least Frequently Used by Size). It avoids any patented algorithms and includes the following features: + +* Balances Recentness, Frequency and Size to maximize hit rate (not byte hit rate). +* Is Scan Resistant and extracts robust hit rates even when the working set does not fit in the Ram Cache. +* Supports compression at 3 levels fastlz, gzip(libz), and xz(liblzma). Compression can be moved to another thread. +* Has very low CPU overhead, only little more than a basic LRU. Rather than using an O(lg n) heap, it uses a probabilistic replacement policy for O(1) cost with low C. +* Has relatively low memory overhead of approximately 200 bytes per object in memory. + +The rational for emphasizing hit rate over byte hit rate is that the overhead of pulling more bytes from secondary storage is low compared to the cost of a request. + +The Ram Cache consists of an object hash fronting 2 LRU/CLOCK lists and a "Seen" hash table. The first "Cached" list contains objects in memory while the second contains a "History" of objects which have either recently been in memory or are being considered for keeping in memory. The "Seen" hash table is used to make the algorithm scan resistant. + +The list entries record the following information: + +* key - 16 byte unique object identifier +* auxkeys - 8 bytes worth of version number (in our system the block in the partition). When the version of an object changes old entries are purged from the cache. +* hits - number of hits within this clock period +* size - the size of the object in the cache +* len - the actual length of the object (differs from size because of compression and padding) +* compressed_len - the compressed length of the object +* compressed (none, fastlz, libz, liblzma) +* uncompressible (flag) +* copy - whether or not this object should be copied in and copied out (e.g. HTTP HDR) +* LRU link +* HASH link +* IOBufferData (smart point to the data buffer) + + +The interface to the cache is Get and Put operations. Get operations check if an object is in the cache and are called on a read attempt. The Put operation decides whether or not to cache the provided object in memory. It is called after a read from secondary storage. + +Seen Hash +========= + +The Seen List becomes active after the Cached and History lists become full after a cold start. The purpose is to make the cache scan resistant which means that the cache state must not be effected at all by a long sequence Get and Put operations on objects which are seen only once. This is essential, without it not only would the cache be polluted, but it could lose critical information about the objects that it cares about. It is therefore essential that the Cache and History lists are not effected by Get or Put operations on objects seen the first time. The Seen Hash maintains a set of 16 bit hash tags, and requests which do not hit in the object cache (are in the Cache List or History List) and do not match the hash tag result in the hash tag begin updated but are otherwise ignored. The Seen Hash is sized to approximately the number of objects in the cache in order to match the number that are passed through it with the CLOCK rate of the Cached and History Lists. + +Cached List +=========== + +The Cached list contains objects actually in memory. The basic operation is LRU with new entries inserted into a FIFO (queue) and hits causing objects to be reinserted. The interesting bit comes when an object is being considered for insertion. First we check if the Object Hash to see if the object is in the Cached List or History. Hits result in updating the "hit" field and reinsertion. History hits result in the "hit" field being updated and a comparison to see if this object should be kept in memory. The comparison is against the least recently used members of the Cache List, and is based on a weighted frequency:: + + CACHE_VALUE = hits / (size + overhead) + +A new object must beat enough bytes worth of currently cached objects to cover itself. Each time an object is considered for replacement the CLOCK moves forward. If the History object has a greater value then it is inserted into the Cached List and the replaced objects are removed from memory and their list entries are inserted into the History List. If the History object has a lesser value it is reinserted into the History List. Objects considered for replacement (at least one) but not replaced have their "hits" field set to zero and are reinserted into the Cached List. This is the CLOCK operation on the Cached List. + +History List +============ + +Each CLOCK the least recently used entry in the History List is dequeued and if the "hits" field is not greater than 1 (it was hit at least once in the History or Cached List) it is deleted, otherwise the "hits" is set to zero and it is requeued on the History List. + +Compression/Decompression +========================= + +Compression is performed by a background operation (currently called as part of Put) which maintains a pointer into the Cached List and runs toward the head compressing entries. Decompression occurs on demand during a Get. In the case of objects tagged "copy" the compressed version is reinserted in the LRU since we need to make a copy anyway. Those not tagged "copy" are inserted uncompressed in the hope that they can be reused in uncompressed form. This is a compile time option and may be something we want to change. + +There are 3 algorithms and levels of compression (speed on 1 thread i7 920) : + +* fastlz: 173 MB/sec compression, 442 MB/sec decompression : basically free since disk or network will limit first, ~53% final size +* libz: 55 MB/sec compression, 234 MB/sec decompression : almost free, particularly decompression, ~37% final size +* liblzma: 3 MB/sec compression, 50 MB/sec decompression : expensive, ~27% final size + +These are ballpark numbers, and your millage will vary enormously. JPEG for example will not compress with any of these. The RamCache does detect compression level and will declare something "incompressible" if it doesn't get below 90% of the original size. This value is cached so that the RamCache will not attempt to compress it again (at least as long as it is in the history). + diff --git a/doc/arch/cache/tier-storage.en.rst b/doc/arch/cache/tier-storage.en.rst new file mode 100644 index 00000000..933b02bd --- /dev/null +++ b/doc/arch/cache/tier-storage.en.rst @@ -0,0 +1,118 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +============================== +Tiered Storage Design +============================== + +.. include:: common.defs + +-------------- +Introduction +-------------- + +Tiered storage is an attempt to allow |TS| to take advantage of physical storage with different properties. This design +concerns only mechanism. Policies to take advantage of these are outside of the scope of this document. Instead we will +presume an *oracle* which implements this policy and describe the queries that must be answered by the oracle and the +effects of the answers. + +Beyond avoiding question of tier policy the design is also intended to be effectively identical to current operations +for the case where there is only one tier. + +The most common case for tiers is an ordered list of tiers, where higher tiers are presumed faster but more expensive +(or more limited in capacity). This is not required. It might be that different tiers are differentiated by other +properties (such as expected persistence). The design here is intended to handle both cases. + +The design presumes that if a user has multiple tiers of storage and an ordering for those tiers, they will usually want +content stored at one tier level to also be stored at every other lower level as well, so that it does not have to be +copied if evicted from a higher tier. + +------------- +Configuration +------------- + +Each storage unit in :file:`storage.config` can be marked with a *quality* value which is 32 bit number. Storage units +that are not marked are all assigned the same value which is guaranteed to be distinct from all explicit values. The +quality value is arbitrary from the point of view of this design, serving as a tag rather than a numeric value. The user +(via the oracle) can impose what ever additional meaning is useful on this value (rating, bit slicing, etc.). In such +cases all volumes should be explicitly assigned a value, as the default / unmarked value is not guaranteed to have any +relationship to explicit values. The unmarked value is intended to be useful in situations where the user has no +interest in tiered storage and so wants to let Traffic Server automatically handle all volumes as a single tier. + +------------- +Operations +------------- + +After a client request is received and processed, volume assignment is done. This would be changed to do volume assignment across all tiers simultaneously. For each tier the oracle would return one of four values along with a volume pointer. + +`READ` + The tier appears to have the object and can serve it. + +`WRITE` + The object is not in this tier and should be written to this tier if possible. + +`RW` + Treat as `READ` if possible but if the object turns out to not in the cache treat as `WRITE`. + +`NO_SALE` + Do not interact with this tier for this object. + +The volume returned for the tier must be a volume with the corresponding tier quality value. In effect the current style +of volume assignment is done for each tier, by assigning one volume out of all of the volumes of the same quality and +returning one of `RW` or `WRITE` depending on whether the initial volume directory lookup succeeds. Note that as with +current volume assignment it is presumed this can be done from in memory structures (no disk I/O required). + +If the oracle returns `READ` or `RW` for more than one tier, it must also return an ordering for those tiers (it may +return an ordering for all tiers, ones that are not readable will be ignored). For each tier, in that order, a read of +cache storage is attempted for the object. A successful read locks that tier as the provider of cached content. If no +tier has a successful read, or no tier is marked `READ` or `RW` then it is a cache miss. Any tier marked `RW` that fails +the read test is demoted to `WRITE`. + +If the object is cached every tier that returns `WRITE` receives the object to store in the selected volume (this +includes `RW` returns that are demoted to `WRITE`). This is a cache to cache copy, not from the origin server. In this +case tiers marked `RW` that are not tested for read will not receive any data and will not be further involved in the +request processing. + +For a cache miss, all tiers marked `WRITE` will receive data from the origin server connection (if successful). + +This means, among other things, that if there is a tier with the object all other tiers that are written will get a +local copy of the object, the origin server will not be used. In terms of implementation, currently a cache write to a +volume is done via the construction of an instance of :cpp:class:`CacheVC` which recieves the object stream. For tiered storage the +same thing is done for each target volume. + +For cache volume overrides (e.g. via :file:`hosting.config`) this same process is used except with only the volumes +stripes contained within the specified cache volume. + +------- +Copying +------- + +It may be necessary to provide a mechanism to copy objects between tiers outside of a client originated transaction. In +terms of implementation this is straight forward using :cpp:class:`HttpTunnel` as if in a transaction only using a :cpp:class:`CacheVC` +instance for both the producer and consumer. The more difficult question is what event would trigger a possible copy. A +signal could be provided whenever a volume directory entry is deleted although it should be noted that the object in +question may have already been evicted when this event happens. + +---------------- +Additional Notes +---------------- + +As an example use, it would be possible to have only one cache volume that uses tiered storage for a particular set of +domains using volume tagging. :file:`hosting.config` would be used to direct those domains to the selected cache volume. +The oracle would check the URL in parallel and return `NO_SALE` for the tiers in the target cache volume for other +domains. For the other tier (that of the unmarked storage units) the oracle would return `RW` for the tier in all cases +as that tier would not be queried for the target domains. diff --git a/doc/arch/hacking/config-var-impl.en.rst b/doc/arch/hacking/config-var-impl.en.rst new file mode 100644 index 00000000..75a80e18 --- /dev/null +++ b/doc/arch/hacking/config-var-impl.en.rst @@ -0,0 +1,283 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +.. Referenced source files + +.. |RecCore.cc| replace:: ``RecCore.cc`` + +.. _RecCore.cc: https://github.com/apache/trafficserver/blob/master/lib/records/RecCore.cc + +.. |RecordsConfig.cc| replace:: ``RecordsConfig.cc`` + +.. _RecordsConfig.cc: https://github.com/apache/trafficserver/blob/master/mgmt/RecordsConfig.cc + +.. |ts.h.in| replace:: ``ts.h.in`` + +.. _ts.h.in: https://github.com/apache/trafficserver/blob/master/proxy/api/ts/ts.h.in + +.. |InkAPI.cc| replace:: ``InkAPI.cc`` + +.. _InkAPI.cc: https://github.com/apache/trafficserver/blob/master/proxy/api/InkAPI.cc + +.. |InkAPITest.cc| replace:: ``InkAPITest.cc`` + +.. _InkAPITest.cc: https://github.com/apache/trafficserver/blob/master/proxy/api/InkAPITest.cc + +.. Referenced enumeration values + +.. |RECU_DYNAMIC| replace:: ``RECU_DYNAMIC`` + +.. _RECU_DYNAMIC: recu-dynamic_ + + +===================================== +Configuration Variable Implementation +===================================== + +Adding a new configuration variable in :file:`records.config` requires a number of steps which are mostly documented +here. + +Before adding a new configuration variable, please discuss it on the mailing list. It will commonly be the case that a +better name will be suggested or a more general approach to the problem which solves several different issues. + +===================================== +Defining the Variable +===================================== + +To begin the new configuration variables must be added to |RecordsConfig.cc|_. This contains a long array of +configuration variable records. The fields for each record are + +type:``RecT`` + Type of record. There valid values are + + ``RECT_NULL`` + Undefined record. + + ``RECT_CONFIG`` + General configuration variable. + + ``RECT_PROCESS`` + Process related statistic. + + ``RECT_NODE`` + Local statistic. + + ``RECT_CLUSTER`` + Cluster statistic. + + ``RECT_LOCAL`` + Configuration variable that is explicitly not shared across a cluster. + + ``RECT_PLUGIN`` + Plugin created statistic. + + In general ``RECT_CONFIG`` should be used unless it is required that the value not be shared among members of a + cluster in which case ``RECT_LOCAL`` should be used. If you use ``RECT_LOCAL`` you must also start the line with ``LOCAL`` instead of ``CONFIG`` and the name should use ``.local.`` instead of ``.config.``. + +name:``char const*`` + The fully qualified name of the configuration variable. Although there appears to be a hierarchial naming scheme, + that's just a convention, it is not actually used by the code. Nonetheless new variables should adhere to the + hierarchial scheme. + +value_type:``RecDataT`` + The data type of the value. It should be one of ``RECD_INT``, ``RECD_STRING``, ``RECD_FLOAT`` as appropriate. + +default:``char const*`` + The default value for the variable. This is always a string regardless of the *value_type*. + +update:``RecUpdateT`` + Information about how the variable is updated. The valid values are + + ``RECU_NULL`` + Behavior is unknown or unspecified. + +.. _recu-dynamic: + + ``RECU_DYNAMIC`` + This can be updated via command line tools. + + ``RECD_RESTART_TS`` + The :ref:`traffic_server` process must be restarted for a new value to take effect. + + ``RECD_RESTART_TM`` + The :ref:`traffic_manager` process must be restarted for a new value to take effect. + + ``RECD_RESTART_TC`` + The :ref:`traffic_cop` process must be restarted for a new value to take effect. + +required:``RecordRequiredType`` + Effectively a boolean that specifies if the record is required to be present, with ``RR_NULL`` meaning not required + and ``RR_REQUIRED`` indicating that it is required. Given that using ``RR_REQUIRED`` would be a major + incompatibility, ``RR_NULL`` is generally the better choice. + +check:``RecCheckT`` + Additional type checking. It is unclear if this is actually implemented. The valid values are + + ``RECC_NULL`` + No additional checking. + + ``RECC_STR`` + Verify the value is a string. + + ``RECC_INT`` + Verify the value is an integer. + + ``RECC_IP`` + Verify the value is an IP address. Unknown if this checks for IPv6. + +pattern:``char const*`` + Even more validity checking. This provides a regular expressions (PCRE format) for validating the value. This can be + ``NULL`` if there is no regular expression to use. + +access:``RecAccessT`` + Access control. The valid values are + + ``RECA_NULL`` + The value is read / write. + + ``RECA_READ_ONLY`` + The value is read only. + + ``RECA_NO_ACCESS`` + No access to the value - only privileged levels parts of the ATS can access the value. + +===================================== +Variable Infrastructure +===================================== + +The primary effort in defining a configuration variable is handling updates, generally via :option:`traffic_line -x`. This +is handled in a generic way, as described in the next section, or in a :ref:`more specialized way +` (built on top of the generic mechanism) for HTTP related configuration variables. This is only +needed if the variable is marked as dynamically updateable (|RECU_DYNAMIC|_) although HTTP configuration variables +should be dynamic if possible. + +-------------------------- +Documentation and Defaults +-------------------------- + +A configuration variable should be documented in :file:`records.config`. There are many examples in the file already that can be used for guidance. The general format is to use the tag :: + + .. ts:cv:: + +The arguments to this are the same as for the configuration file. The documentation generator will pick out key bits and use them to decorate the entry. In particular if a value is present it will be removed and used as the default value. You can attach some additional options to the variable. These are + +reloadable + The variable can be reloaded via command line on a running Traffic Server. + +metric + Specify the units for the value. This is critical for variables that use unexpected or non-obvious metrics, such as minutes instead of seconds, or disk sectors instead of bytes. + +deprecated + Mark a variable as deprecated. + +.. topic:: Example + + :: + + :reloadable: + :metric: minutes + :deprecated: + +If you need to refer to another configuration variable in the documentation, you can use the form :: + + :ts:cv:`the.full.name.of.the.variable` + +This will display the name as a link to the definition. + +In general a new configuration variable should not be present in the default :file:`records.config`. If it is added, such defaults should be added to the file ``proxy/config/records.config.default.in``. This is used to generate the default :file:`records.config`. Just add the variable to the file in an appropriate place with a proper default as this will now override whatever default you put in the code for new installs. + +------------------------------ +Handling Updates +------------------------------ + +The simplest mechanism for handling updates is the ``REC_EstablishStaticConfigXXX`` family of functions. This mechanism +will cause the value in the indicated instance to be updated in place when an update to :file:`records.config` occurs. +This is done asynchronously using atomic operations. Use of these variables must keep that in mind. Adding ``volatile`` to the declaration is likely to be a good idea. + +If a variable requires additional handling when updated a callback can be registered which is called when the variable +is updated. This is what the ``REC_EstablishStaticConfigXXX`` calls do internally with a callback that simply reads the +new value and writes it to storage indicated by the call parameters. The functions used are the ``link_XXX`` static +functions in |RecCore.cc|_. + +To register a configuration variable callback, call ``RecRegisterConfigUpdateCb`` with the arguments + +``char const*`` *name* + The variable name. + +*callback* + A function with the signature ````. The *name* + value passed is the same as the *name* passed to the registration function as is the *cookie* argument. The *type* and + *data* are the new value for the variable. The return value is currently ignored. For future compatibility return + ``REC_ERR_OKAY``. + +``void*`` *cookie* + A value passed to the *callback*. This is only for the callback, the internals simply store it and pass it on. + +*callback* is called under lock so it should be quick and not block. If that is necessary a continuation should be +scheduled to handle the required action. + +.. note:: + The callback occurs asynchronously. For HTTP variables as described in the next section, this is handled by the more + specialized HTTP update mechanisms. Otherwise it is the implementor's responsibility to avoid race conditions. + +.. _http-config-var-impl: + +------------------------ +HTTP Configuation Values +------------------------ + +Variables used for HTTP processing should be declared as members of the ``HTTPConfigParams`` structure (but :ref:`see +`) and use the specialized HTTP update mechanisms which handle synchronization and +initialization issues. + +The configuration logic maintains two copies of the ``HTTPConfigParams`` structure - the master copy and the current +copy. The master copy is kept in the ``m_master`` member of the ``HttpConfig`` singleton. The current copy is kept in +the ConfigProcessor. The goal is to provide a (somewhat) atomic update for configuration variables which are loaded +individually in to the master copy as updates are received and then bulk copied to a new instance which is then swapped +in as the current copy. The HTTP state machine interacts with this mechanism to avoid race conditions. + +For each variable a mapping between the variable name and the appropriate member in the master copy should be +established between in the ``HTTPConfig::startup`` method. The ``HttpEstablishStaticConfigXXX`` functions should be used +unless there is an strong, explicit reason to not do so. + +The ``HTTPConfig::reconfigure`` method handles the current copy of the HTTP configuration variables. Logic should be +added here to copy the value from the master copy to the current copy. Generally this will be a simple assignment. If +there are dependencies between variables those should be enforced / checked in this method. + +.. _overridable-config-vars: + +----------------------- +Overridable Variables +----------------------- + +HTTP related variables that are changeable per transaction are stored in the ``OverridableHttpConfigParams`` structure, +an instance of which is the ``oride`` member of ``HTTPConfigParams`` and therefore the points in the previous section +still apply. The only difference for that is the further ``.oride`` in the structure references. + +In addition the variable is required to be accessible from the transaction API. In addition to any custom API functions +used to access the value, the following items are required for generic access + +#. Add a value to the ``TSOverridableConfigKey`` enumeration in |ts.h.in|_. + +#. Augment the ``TSHttpTxnConfigFind`` function to return this enumeration value when given the name of the configuration + variable. Be sure to count the charaters very carefully. + +#. Augment the ``_conf_to_memberp`` function in |InkAPI.cc|_ to return a pointer to the appropriate member of + ``OverridableHttpConfigParams`` and set the type if not a byte value. + +#. Update the testing logic in |InkAPITest.cc|_ by adding the string name of the configuration variable to the + ``SDK_Overridable_Configs`` array. diff --git a/doc/arch/hacking/index.en.rst b/doc/arch/hacking/index.en.rst new file mode 100644 index 00000000..84e997a6 --- /dev/null +++ b/doc/arch/hacking/index.en.rst @@ -0,0 +1,30 @@ +Hacking +******* + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Introduction +------------ + +This is a documentation stub on how to hack Apache Traffic Server. Here we try to document things such as how to write +and run unit or regression tests or how to inspect the state of the core with a debugger. + +.. toctree:: + :maxdepth: 1 + + config-var-impl.en diff --git a/doc/arch/index.en.rst b/doc/arch/index.en.rst new file mode 100644 index 00000000..6cc9fac8 --- /dev/null +++ b/doc/arch/index.en.rst @@ -0,0 +1,37 @@ +Architecture and Hacking +************************ + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Introduction +-------------- + +The original architectural documents for Traffic Server were lost in the transition to an open source project. The +documents in this section are provisional and were written based on the existing code. The purpose is to have a high +level description of aspects of Traffic Server to better inform ongoing work. + +In the final section on "hacking" we try to document our approaches to understanding and modifying the source. + +Contents: + +.. toctree:: + :maxdepth: 2 + + cache/cache.en + proposals/hostdb.en + hacking/index.en diff --git a/doc/arch/proposals/hostdb.en.rst b/doc/arch/proposals/hostdb.en.rst new file mode 100644 index 00000000..16a6b4ea --- /dev/null +++ b/doc/arch/proposals/hostdb.en.rst @@ -0,0 +1,168 @@ +Host Resolution +****************** + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +Introduction +-------------- + +The current mechanism for resolving host names to IP addresses for Traffic Server is contained the HostDB and DNS +libraries. These take hostnames and provide IP addresses for them. + +The current implementation is generally considered inadequate, both from a functionality point of view and difficulty in +working with it in other parts of Traffic Server. As Traffic Server is used in more complex situtations this inadequacy +presents increasing problems. + +Goals +----- + +Updating the host name resolution (currently referred to as "HostDB") has several functions goals + +* Enable additional processing layers to be easily added. +* Enable plugins to directly access the name resolution logic +* Enable plugins to provide name resolution +* Asynchronous (immediate resolve or callback on block) +* Minimize allocations -- in particular no allocations for cached resolutions +* Simplify interactions with the resolution, particularly with regard to nameservers, origin server failover, and + address family handling. + +It is also necessary to support a number of specific features that are either currently available or strongly desired. + +* SplitDNS or its equivalent +* Use of a hosts file (e.g. ``/etc/hosts``) +* Simultaneous IPv4 and IPv6 queries +* IP family control +* Negative caching + * Server connection failures + * Query failures + * Nameserver failures. +* Address validity time out control +* Address round robin support +* SRV record support (weighted records) +* Nameserver round robin +* Plugin access to nameserver data (add, remove, enumerate) +* Plugin provision of resolvers. +* Hooks for plugin detection / recovery from resolution events. + +One issue is persistence of the cached resolutions. This creates problems for the current implementation (because of +size limits it imposes on the cached data) but also allows for quicker restarts in a busy environment. + +Basics +------ + +The basic design is to separate the functionality into chainable layers so that a resolver with the desired attributes +can be assembled from those layers. The core interface is that of a lazy iterator. This object returns one of four +results when asked for an address + +* An IP address +* Done(no more addresses are available) +* Wait(an address may be available in the future) +* Fail (no address is available and none will be so in the future) + +Each layer (except the bottom) uses this API and also provides it. This enables higher level logic such as the state +machine to simply use the resolver as a list without having to backtrack states in the case of failures, or have special +cases for different resolution sources. + +To perform a resolution, a client creates a query object (potentially on the stack), initializes it with the required +data (at least the hostname) and then starts the resolution. Methods on the query object allow its state and IP address +data to be accessed. + +Required Resolvers +------------------------ + +Nameserver + A bottom level resolver that directly queries a nameserver for DNS data. This contains much of the functionality + currently in the ``iocore/dns`` directory. + +SplitDNS + A resolver that directs requests to one of several resolvers. To emulate current behavior these would be Nameserver + instances. + +NameserverGroup + A grouping mechanism for Nameserver instances that provides failover, round robin, and ordering capabilities. It may be + reasonable to merge this with the SplitDNS resolver. + +HostFile + A resolver that uses a local file to resolve names. + +AddressCache + A resolver that also has a cache for resolution results. It requires another resolver instance to perform the actual + resolution. + +Preloaded + A resolver that can contain one or more explicitly set IP addresses which are returned. When those are exhausted it + falls back to another resolver. + +Configuration +------------- + +To configuration the resolution, each resolver would be assigned a tag. It is not, however, sufficient to simply provide +the list of resolver tags because some resolvers require additional configuration. Unfortunately this will likely +require a separate configuration file outside of :file:`records.config`, although we would be able to remove +:file:`splitdns.config`. In this case we would need chain start / end markers around a list of resolver tags. Each tag +would the be able to take additional resolver configuration data. For instance, for a SplitDNS resolver the nameservers. + +Examples +-------- + +Transparent operations would benefit from the *Preloaded* resolver. This would be loaded with the origin host address +provided by the client connection. This could be done early in processing and then no more logic would be required to +skip DNS processing as it would happen without additional action by the state machine. It would handle the problem of de +facto denial of service if an origin server becomes unavailable in that configuration, as *Preloaded* would switch to +alternate addresses automatically. + +Adding host file access would be easier as well, as it could be done in a much more modular fashion and then added to +the stack at configuration time. Whether such addresses were cached would be controlled by chain arrangement rather yet +more configuration knobs. + +The default configuration would be *Preloaded* : *AddressCache* : *Nameserver*. + +In all cases the state machine makes requests against the request object to get IP addresses as needed. + +Issues +------ + +Request object allocation +========================= + +The biggest hurdle is being able to unwind a resolver chain when a block is encountered. There are some ways to deal with this. + +1) Set a maximum resolver chain length and declare the request instance so that there is storage for state for that many +resolvers. If needed and additional value of maximum storage per chain could be set as well. The expected number of +elements in a chain is expected to be limited, 10 would likely be a reaosnable limit. If settable at source +configuration time this should be sufficient. + +2) Embed class allocators in resolver chains and mark the top / outermost / first resolver. The maximum state size for a +resolution can be calculated when the chain is created and then the top level resolver can use an allocation pool to +efficiently allocate request objects. This has an advantage that with a wrapper class the request object can be passed +along cheaply. Whether that's an advantage in practice is unclear. + +Plugin resolvers +================ + +If plugins can provide resolvers, how can these can integrated in to existing resolver chains for use by the HTTP SM for +instance? + +Feedback +======== + +It should be possible for a client to provide feedback about addresses (e.g., the origin server at this address is not +available). Not all resolvers will handle feedback but some will and that must be possible. + +Related to this is that caching resolvers (such as *AddressCache*) must be able to iterator over all resolved addresses +even if their client does not ask for them. In effect they must background fill the address data. diff --git a/doc/checkvers.py b/doc/checkvers.py new file mode 100644 index 00000000..cda51399 --- /dev/null +++ b/doc/checkvers.py @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys, os + +if __name__ == '__main__': + # Use optparse instead of argparse because this needs to work on old Python versions. + import optparse + + parser = optparse.OptionParser(description='Traffic Server Sphinx docs configuration') + parser.add_option('--check-version', action='store_true', dest='checkvers') + + (options, args) = parser.parse_args() + + # Check whether we have a recent version of sphinx. EPEL and CentOS are completely crazy and I don't understand their + # packaging at all. The test below works on Ubuntu and places where sphinx is installed sanely AFAICT. + if options.checkvers: + print 'checking for sphinx version >= 1.1... ', + try: + import sphinx + version = sphinx.__version__ + (major, minor, micro) = version.split('.') + if (int(major) > 1) or (int(major) == 1 and int(minor) >= 1): + print 'found ' + sphinx.__version__ + sys.exit(0) + except Exception as e: + print e + sys.exit(1) diff --git a/doc/conf.py b/doc/conf.py new file mode 100644 index 00000000..b4f59ae5 --- /dev/null +++ b/doc/conf.py @@ -0,0 +1,381 @@ +# -*- coding: utf-8 -*- +# +# Apache Traffic Server documentation build configuration file, created by +# sphinx-quickstart on Mon Mar 4 06:23:15 2013. +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath('ext')) +sys.path.insert(0, os.path.abspath('.')) + +from manpages import man_pages + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.graphviz', + 'sphinx.ext.intersphinx', + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.pngmath', + 'sphinx.ext.viewcode', + 'traffic-server', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Apache Traffic Server' +copyright = u'2014, dev@trafficserver.apache.org' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# NOTE: The build system automatically sets the version correctly, but external +# sphinx builders (like readthedocs.org) do not, so we still have to manually +# update a reasonable version number here. +# +# The short X.Y version. +version = '5.0' +# The full version, including alpha/beta/rc tags. +release = '5.0.x' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None +locale_dirs = ['locale/'] +gettext_compact = False + +## HACK for Read-the-Docs +## Generate .mo files just in time +if os.environ.get('READTHEDOCS') == 'True': + import polib + print "Generating .mo files", + for locale_dir in locale_dirs: + for path, dummy, filenames in os.walk(locale_dir): + for filename in filenames: + po_file = os.path.join(path, filename) + base, ext = os.path.splitext(po_file) + if ext == ".po": + mo_file = base + ".mo" + po = polib.pofile(po_file) + po.save_as_mofile(fpath=mo_file) + print "done" +## End of HACK + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +nitpicky=1 + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +#html_theme = 'agogo' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +html_logo = 'static/images/trans_logo_tm_380x69.png' + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +html_favicon = 'static/images/favicon.ico' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ApacheTrafficServerdoc' + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'ApacheTrafficServer.tex', u'Apache Traffic Server Documentation', + u'dev@trafficserver.apache.org', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + +# -- Options for manual page output -------------------------------------------- + +# The global "man_pages" is imported from ts/manpages.py + +# If true, show URL addresses after external links. +#man_show_urls = False + +# Get the manual page description from the reStructuredText document. +# This keeps the list of manual pages consistent with the source +# documents and includes the same brief description in both the HTML +# and manual page outputs. + +from docutils import nodes +from docutils.transforms import frontmatter +from sphinx.writers import manpage + +# Override ManualPageWriter and ManualPageTranslator in the only way +# that Sphinx supports + +BaseWriter = manpage.ManualPageWriter +class ManualPageWriter(BaseWriter): + def translate(self): + transform = frontmatter.DocTitle(self.document) + + section, index = transform.candidate_index(self.document) + if index: + + # A sentence after the title is the manual page description + if len(section) > 1 and isinstance(section[1], nodes.paragraph): + + description = section.pop(1).astext() + description = description[:1].lower() + description[1:] + description = description.rstrip('.') + + self.document.settings.subtitle = description + + # Instead of section_level = -1, use the standard Docutils + # DocTitle transform to hide the top level title + transform.promote_title(self.document) + + # The title is the manual page name + transform.set_metadata() + + BaseWriter.translate(self) + +manpage.ManualPageWriter = ManualPageWriter + +BaseTranslator = manpage.ManualPageTranslator +class ManualPageTranslator(BaseTranslator): + def __init__(self, builder, *args, **kwds): + BaseTranslator.__init__(self, builder, *args, **kwds) + + # Instead of section_level = -1, use the standard Docutils + # DocTitle transform to hide the top level title + self.section_level = 0 + +manpage.ManualPageTranslator = ManualPageTranslator + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'ApacheTrafficServer', u'Apache Traffic Server Documentation', + u'dev@trafficserver.apache.org', 'ApacheTrafficServer', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# -- Options for Epub output --------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = u'Apache Traffic Server' +epub_author = u'dev@trafficserver.apache.org' +epub_publisher = u'dev@trafficserver.apache.org' +epub_copyright = u'2013, dev@trafficserver.apache.org' + +# The language of the text. It defaults to the language option +# or en if the language is not set. +#epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +#epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +#epub_identifier = '' + +# A unique identification for the text. +#epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +#epub_cover = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_post_files = [] + +# A list of files that should not be packed into the epub file. +#epub_exclude_files = [] + +# The depth of the table of contents in toc.ncx. +#epub_tocdepth = 3 + +# Allow duplicate toc entries. +#epub_tocdup = True +mathjax_path = 'https://docs.trafficserver.apache.org/__RTD/MathJax.js' diff --git a/doc/doap.rdf b/doc/doap.rdf new file mode 100644 index 00000000..b3cdef6c --- /dev/null +++ b/doc/doap.rdf @@ -0,0 +1,64 @@ + + + + + + 2011-04-08 + + Apache Traffic Server + + + Apache Traffic Server is an HTTP proxy cache. + Apache Traffic Server is fast, scalable and extensible HTTP/1.1 compliant caching proxy server. ATS can be used as a reverse, forward or even transparent HTTP proxy. + + + + + C + + + + trafficserver + 2012-03-21 + 3.0.4 + + + + + + + + + + + Leif Hedstrom + + + + + HTTP/1.1 + IETF + 2616 + + + + diff --git a/doc/dot/ResponseDiag.dot b/doc/dot/ResponseDiag.dot new file mode 100644 index 00000000..9fed3e03 --- /dev/null +++ b/doc/dot/ResponseDiag.dot @@ -0,0 +1,70 @@ +/* response processing diagram HttpSM */ +/* */ + +digraph g { +center=1; +size = "7.5,10"; +fontsize="20"; +label = "Inktomi CONFIDENTIAL"; +orientation = "portrait"; + +"READ_RESP_HDR" [ label = "Read Response Header" ]; +"CALL_HANDLE_RESP" [ label = "Call Handle Response" ]; + +/* Proxy Internal Nodes */ +"PROXY_INTERNAL" [ label = "PROXY_INTERNAL*" ]; +"PI_HANDLE_CACHE_WORK" [ label = "handle cache work" ]; +"PI_RESP_HDR_API" [label = "call Send Response Hdr Hook" ]; +"PI_INTERNAL_TRANSFER" [ label = "Internal Transfer" ]; + +/* Server Read Nodes */ +"SERVER_READ" [ label = "SERVER_READ*" ]; +"SER_HANDLE_CACHE_WORK" [ label = "handle cache work" ]; +"SER_TRANSFORM_Q" [ label = "transform?", shape = "diamond" ]; +"SER_NO_T_RESP_HDR_API" [label = "call Send Response Hdr Hook" ]; +"SER_NO_T_SERVER_TRANSFER" [ label = "Server Transfer" ]; + +/* Transform Nodes */ +"INIT_XFER_TO_TRANS" [label = "Initiate Transfer to Transform" ]; +"WAIT_FOR_READ_READY" [label = "Wait for Transform Read Ready" ]; +"TRANFORM_RESP_HDR_API" [label = "call Send Response Hdr Hook" ]; +"TRANSFORM_TRANSFER" [ label = "Transform Transfer" ]; + +/* Cache Read Nodes */ +"CACHE_READ" [ label = "CACHE_READ*" ]; +"CA_HANDLE_CACHE_WORK" [ label = "handle cache work" ]; +"CA_TRANSFORM_Q" [ label = "transform?", shape = "diamond" ]; +"CA_NO_T_RESP_HDR_API" [label = "call Send Response Hdr Hook" ]; +"CA_NO_T_SERVER_TRANSFER" [ label = "Cache Transfer" ]; + +/* Common path */ +"READ_RESP_HDR" -> "CALL_HANDLE_RESP"; +"CALL_HANDLE_RESP" -> "SERVER_READ"; +"CALL_HANDLE_RESP" -> "CACHE_READ"; +"CALL_HANDLE_RESP" -> "PROXY_INTERNAL"; + +/* Proxy Internal Path */ +"PROXY_INTERNAL" -> "PI_HANDLE_CACHE_WORK"; +"PI_HANDLE_CACHE_WORK" -> "PI_RESP_HDR_API"; +"PI_RESP_HDR_API" -> "PI_INTERNAL_TRANSFER"; + +/* Server Read Path */ +"SERVER_READ" -> "SER_HANDLE_CACHE_WORK"; +"SER_HANDLE_CACHE_WORK" -> "SER_TRANSFORM_Q"; +"SER_TRANSFORM_Q" -> "SER_NO_T_RESP_HDR_API" [ label = "no" ]; +"SER_NO_T_RESP_HDR_API" -> "SER_NO_T_SERVER_TRANSFER"; + +/* Cache Read Path" */ +"CACHE_READ" -> "CA_HANDLE_CACHE_WORK"; +"CA_HANDLE_CACHE_WORK" -> "CA_TRANSFORM_Q"; +"CA_TRANSFORM_Q" -> "CA_NO_T_RESP_HDR_API" [ label = "no" ]; +"CA_NO_T_RESP_HDR_API" -> "CA_NO_T_SERVER_TRANSFER"; + + +/* Transform Path */ +"SER_TRANSFORM_Q" -> "INIT_XFER_TO_TRANS" [ label = "yes" ]; +"CA_TRANSFORM_Q" -> "INIT_XFER_TO_TRANS" [ label = "yes" ]; +"INIT_XFER_TO_TRANS" -> "WAIT_FOR_READ_READY"; +"WAIT_FOR_READ_READY" -> "TRANFORM_RESP_HDR_API"; +"TRANFORM_RESP_HDR_API" -> "TRANSFORM_TRANSFER"; +} \ No newline at end of file diff --git a/doc/dot/SimpleStateDiag.dot b/doc/dot/SimpleStateDiag.dot new file mode 100644 index 00000000..0a49b691 --- /dev/null +++ b/doc/dot/SimpleStateDiag.dot @@ -0,0 +1,65 @@ +/* A simplified state diagram for HttpSM */ +/* */ + +digraph g { +center=1; +size = "7.5,10"; +fontsize="20"; +label = "Inktomi CONFIDENTIAL"; +orientation = "portrait"; + +"ACCEPT" [ label = "accept" ]; +"RD_REQ_HDRS" [ label = "Read Req Headers" ]; +"DNS" [ label = "DNS" ]; +"C_LOOKUP" [ label = "Cache Lookup" ]; +"CACHE_LOCK" [ label = "Lock URL in Cache" ]; +"PICK_ADDR" [ label = "Pick Address" ]; +"CACHE_MATCH" [ label = "Cache Match" ]; +"CACHE_FRESH" [ label = "Cache Fresh" ]; +"SND_REQ_HDRS" [ label = "Send Req Headers" ]; +"SETUP_C_READ" [ label = "Setup Cache Read" ]; +"SND_C_HDRS" [ label = "Send Cached Headers" ]; +"CONNECT" [ label = "Try Connect" ]; +"SND_REQ_HDRS" [ label = "Send Req Headers" ]; +"RD_REP_HDRS" [ label = "Read Reply Headers" ]; +"VALID" [ label = "Check Valid" ]; +"SETUP_S_READ" [ label = "Setup Server Read" ]; +"SETUP_CACHE_WRITE" [ label = "Setup Cache Write" ]; +"SETUP_TRANS" [ label = "Setup Transform" ]; +"SETUP_REQ_TRANS" [ label = "Setup Request Transform" ]; +"SETUP_BODY_READ" [ label = "Setup POST/PUT Read" ]; +"TUNNEL" [ label = "Tunnel Response" ]; +"TUNNEL_REQ" [ label = "Tunnel Request Body" ]; + + +"ACCEPT" -> "RD_REQ_HDRS"; +"RD_REQ_HDRS" -> "DNS"; +"DNS" -> "C_LOOKUP"; +"C_LOOKUP" -> "CACHE_MATCH" [ label = "hit" ]; +"C_LOOKUP" -> "CACHE_LOCK" [ label = "miss" ]; +"CACHE_LOCK" -> "PICK_ADDR"; +"CACHE_MATCH" -> "CACHE_LOCK" [ label = "no match" ]; +"CACHE_MATCH" -> "CACHE_FRESH" [ label ="match" ]; +"CACHE_FRESH" -> "SND_C_HDRS" [ label ="fresh" ]; +"CACHE_FRESH" -> "CACHE_LOCK" [ label ="stale" ]; +"SND_C_HDRS" -> "SETUP_C_READ"; +"SETUP_C_READ" -> "SETUP_TRANS"; +"PICK_ADDR" -> "CONNECT"; +"CONNECT" -> "PICK_ADDR" [ label = "fail" ]; +"CONNECT" -> "SND_REQ_HDRS" [ label = "success" ]; +"SND_REQ_HDRS" -> "SETUP_BODY_READ" [label = "POST/PUT" ]; +"SETUP_BODY_READ" -> "SETUP_REQ_TRANS"; +"SETUP_REQ_TRANS" -> "TUNNEL_REQ"; +"TUNNEL_REQ" -> "RD_REP_HDRS"; +"SND_REQ_HDRS" -> "RD_REP_HDRS" [label = "GET" ]; +"RD_REP_HDRS" -> "VALID"; +"VALID" -> "PICK_ADDR" [ label = "no" ]; +"VALID" -> "SETUP_S_READ" [ label = "yes" ]; +"SETUP_S_READ" -> "SETUP_TRANS" [ label = "Uncachable" ]; +"SETUP_S_READ" -> "SETUP_CACHE_WRITE" [ label = "Cachable" ]; +"SETUP_CACHE_WRITE" -> "SETUP_TRANS"; +"SETUP_TRANS" -> "TUNNEL"; +"TUNNEL" -> "ACCEPT"; + +} + diff --git a/doc/dot/SimpleStateDiagAPI.dot b/doc/dot/SimpleStateDiagAPI.dot new file mode 100644 index 00000000..2a2f1d0d --- /dev/null +++ b/doc/dot/SimpleStateDiagAPI.dot @@ -0,0 +1,86 @@ +/* A simplified state diagram for HttpSM with API callout points */ +/* */ + + +digraph g { +center=1; +size = "7.5,10"; +fontsize="20"; +label = "Inktomi CONFIDENTIAL"; +orientation = "portrait"; + +"ACCEPT" [ label = "Accept" ]; +"RD_REQ_HDRS" [ label = "Read Req Headers" ]; +"DNS" [ label = "DNS" ]; +"C_LOOKUP" [ label = "Cache Lookup" ]; +"CACHE_LOCK" [ label = "Lock URL in Cache" ]; +"PICK_ADDR" [ label = "Pick Address" ]; +"CACHE_MATCH" [ label = "Cache Match" ]; +"CACHE_FRESH" [ label = "Cache Fresh" ]; +"SND_REQ_HDRS" [ label = "Send Req Headers" ]; +"SETUP_C_READ" [ label = "Setup Cache Read" ]; +"SND_C_HDRS" [ label = "Send Cached Headers" ]; +"CONNECT" [ label = "Try Connect" ]; +"SND_REQ_HDRS" [ label = "Send Req Headers" ]; +"RD_REP_HDRS" [ label = "Read Reply Headers" ]; +"VALID" [ label = "Check Valid" ]; +"SETUP_S_READ" [ label = "Setup Server Read" ]; +"SETUP_CACHE_WRITE" [ label = "Setup Cache Write" ]; +"SETUP_TRANS" [ label = "Setup Transform" ]; +"SETUP_REQ_TRANS" [ label = "Setup Request Transform" ]; +"SETUP_BODY_READ" [ label = "Setup POST/PUT Read" ]; +"TUNNEL" [ label = "Tunnel Response" ]; +"TUNNEL_REQ" [ label = "Tunnel Request Body" ]; +"SND_REP_HDRS" [ label = "Send Reply Headers" ]; + +"API_START" [ label = "API - Start" shape=box ]; +"API_RD_REQ_HDRS" [ label = "API - Read Req Headers" shape=box ]; +"API_DNS" [ label = "API - DNS" shape=box ]; +"API_CACHE_MATCH" [ label = "API - Cache Match" shape=box ]; +"API_CACHE_READ_HDR" [ label = "API - Cache Read Header" shape=box ]; +"API_SND_REQ_HDRS" [ label = "API - Send Req Headers" shape=box ]; +"API_RD_REP_HDRS" [ label = "API - Read Reply Headers" shape=box ]; +"API_SND_REP_HDRS" [ label = "API - Send Reply Headers" shape=box ]; +"API_SHUTDOWN" [ label = "API - Shutdown" shape=box ]; + +"ACCEPT" -> "API_START"; +"API_START" -> "RD_REQ_HDRS"; +"RD_REQ_HDRS" -> "API_RD_REQ_HDRS"; +"API_RD_REQ_HDRS" -> "DNS"; +"DNS" -> "API_DNS"; +"API_DNS" -> "C_LOOKUP"; +"C_LOOKUP" -> "API_CACHE_MATCH" [ label = "hit" ]; +"C_LOOKUP" -> "CACHE_LOCK" [ label = "miss" ]; +"CACHE_LOCK" -> "PICK_ADDR"; +"API_CACHE_MATCH" -> "CACHE_MATCH"; +"CACHE_MATCH" -> "CACHE_LOCK" [ label = "no match" ]; +"CACHE_MATCH" -> "API_CACHE_READ_HDR" [ label ="match" ]; +"API_CACHE_READ_HDR" -> "CACHE_FRESH"; +"CACHE_FRESH" -> "SND_C_HDRS" [ label ="fresh" ]; +"CACHE_FRESH" -> "CACHE_LOCK" [ label ="stale" ]; +"SND_C_HDRS" -> "SETUP_C_READ"; +"SETUP_C_READ" -> "SETUP_TRANS"; +"PICK_ADDR" -> "CONNECT"; +"CONNECT" -> "PICK_ADDR" [ label = "fail" ]; +"CONNECT" -> "API_SND_REQ_HDRS" [ label = "success" ]; +"API_SND_REQ_HDRS" -> "SND_REQ_HDRS"; +"SND_REQ_HDRS" -> "SETUP_BODY_READ" [label = "POST/PUT" ]; +"SETUP_BODY_READ" -> "SETUP_REQ_TRANS"; +"SETUP_REQ_TRANS" -> "TUNNEL_REQ"; +"TUNNEL_REQ" -> "RD_REP_HDRS"; +"SND_REQ_HDRS" -> "RD_REP_HDRS" [label = "GET" ]; +"RD_REP_HDRS" -> "API_RD_REP_HDRS" +"API_RD_REP_HDRS" -> "VALID"; +"VALID" -> "PICK_ADDR" [ label = "no" ]; +"VALID" -> "SETUP_S_READ" [ label = "yes" ]; +"SETUP_S_READ" -> "SETUP_TRANS" [ label = "Uncachable" ]; +"SETUP_S_READ" -> "SETUP_CACHE_WRITE" [ label = "Cachable" ]; +"SETUP_CACHE_WRITE" -> "SETUP_TRANS"; +"SETUP_TRANS" -> "API_SND_REP_HDRS"; +"API_SND_REP_HDRS" -> "SND_REP_HDRS"; +"SND_REP_HDRS" -> "TUNNEL"; +"TUNNEL" -> "API_SHUTDOWN" +"API_SHUTDOWN" -> "ACCEPT"; + +} + diff --git a/doc/ext/traffic-server.py b/doc/ext/traffic-server.py new file mode 100644 index 00000000..bae9c6bb --- /dev/null +++ b/doc/ext/traffic-server.py @@ -0,0 +1,238 @@ +# -*- coding: utf-8 -*- +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + TS Sphinx Directives + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Sphinx Docs directives for Apache Traffic Server + + :copyright: Copyright 2013 by the Apache Software Foundation + :license: Apache +""" + +from docutils import nodes +from docutils.parsers import rst +from sphinx.domains import Domain, ObjType, std +from sphinx.roles import XRefRole +from sphinx.locale import l_, _ +import sphinx + +class TSConfVar(std.Target): + """ + Description of a traffic server configuration variable. + + Argument is the variable as defined in records.config. + + Descriptive text should follow, indented. + + Then the bulk description (if any) undented. This should be considered equivalent to the Doxygen + short and long description. + """ + + option_spec = { + 'class' : rst.directives.class_option, + 'reloadable' : rst.directives.flag, + 'deprecated' : rst.directives.flag, + 'metric' : rst.directives.unchanged, + } + required_arguments = 3 + optional_arguments = 1 # default is optional, special case if omitted + final_argument_whitespace = True + has_content = True + + def make_field(self, tag, value): + field = nodes.field(); + field.append(nodes.field_name(text=tag)) + body = nodes.field_body() + if (isinstance(value, basestring)): + body.append(sphinx.addnodes.compact_paragraph(text=value)) + else: + body.append(value) + field.append(body) + return field + + # External entry point + def run(self): + env = self.state.document.settings.env + cv_default = None + cv_scope, cv_name, cv_type = self.arguments[0:3] + if (len(self.arguments) > 3): + cv_default = self.arguments[3] + + # First, make a generic desc() node to be the parent. + node = sphinx.addnodes.desc() + node.document = self.state.document + node['objtype'] = 'cv' + + # Next, make a signature node. This creates a permalink and a + # highlighted background when the link is selected. + title = sphinx.addnodes.desc_signature(cv_name, '') + title['ids'].append(nodes.make_id(cv_name)) + title['names'].append(cv_name) + title['first'] = False + title['objtype'] = 'cv' + self.add_name(title) + title.set_class('ts-cv-title') + + # Finally, add a desc_name() node to display the name of the + # configuration variable. + title += sphinx.addnodes.desc_name(cv_name, cv_name) + + node.append(title) + + if ('class' in self.options): + title.set_class(self.options.get('class')) + # This has to be a distinct node before the title. if nested then + # the browser will scroll forward to just past the title. + anchor = nodes.target('', '', names=[cv_name]) + # Second (optional) arg is 'msgNode' - no idea what I should pass for that + # or if it even matters, although I now think it should not be used. + self.state.document.note_explicit_target(title) + env.domaindata['ts']['cv'][cv_name] = env.docname + + fl = nodes.field_list() + fl.append(self.make_field('Scope', cv_scope)) + fl.append(self.make_field('Type', cv_type)) + if (cv_default): + fl.append(self.make_field('Default', cv_default)) + else: + fl.append(self.make_field('Default', sphinx.addnodes.literal_emphasis(text='*NONE*'))) + if ('metric' in self.options): + fl.append(self.make_field('Metric', self.options['metric'])) + if ('reloadable' in self.options): + fl.append(self.make_field('Reloadable', 'Yes')) + if ('deprecated' in self.options): + fl.append(self.make_field('Deprecated', 'Yes')) + + # Get any contained content + nn = nodes.compound(); + self.state.nested_parse(self.content, self.content_offset, nn) + + # Create an index node so that Sphinx adds this config variable to the + # index. nodes.make_id() specifies the link anchor name that is + # implicitly generated by the anchor node above. + indexnode = sphinx.addnodes.index(entries=[]) + indexnode['entries'].append( + ('single', _('%s') % cv_name, nodes.make_id(cv_name), '') + ) + + return [ indexnode, node, fl, nn ] + + +class TSConfVarRef(XRefRole): + def process_link(self, env, ref_node, explicit_title_p, title, target): + return title, target + + +class TrafficServerDomain(Domain): + """ + Apache Traffic Server Documentation. + """ + + name = 'ts' + label = 'Traffic Server' + data_version = 2 + + object_types = { + 'cv': ObjType(l_('configuration variable'), 'cv') + } + + directives = { + 'cv' : TSConfVar + } + + roles = { + 'cv' : TSConfVarRef() + } + + initial_data = { + 'cv' : {} # full name -> docname + } + + dangling_warnings = { + 'cv' : "No definition found for configuration variable '%(target)s'" + } + + def clear_doc(self, docname): + cv_list = self.data['cv'] + for var, doc in cv_list.items(): + if doc == docname: + del cv_list[var] + + def find_doc(self, key, obj_type): + zret = None + + if obj_type == 'cv' : + obj_list = self.data['cv'] + else: + obj_list = None + + if obj_list and key in obj_list: + zret = obj_list[key] + + return zret + + def resolve_xref(self, env, src_doc, builder, obj_type, target, node, cont_node): + dst_doc = self.find_doc(target, obj_type) + if (dst_doc): + return sphinx.util.nodes.make_refnode(builder, src_doc, dst_doc, nodes.make_id(target), cont_node, 'records.config') + + def get_objects(self): + for var, doc in self.data['cv'].iteritems(): + yield var, var, 'cv', doc, var, 1 + +# These types are ignored as missing references for the C++ domain. +EXTERNAL_TYPES = set(( + 'int', 'uint', + 'uint8_t', 'uint16_t', 'uint24_t', 'uint32_t', 'uint64_t', + 'int8_t', 'int16_t', 'int24_t', 'int32_t', 'int64_t', + 'off_t', 'size_t', 'time_t', + 'Event', 'INK_MD5', 'DLL', + )) + +# Clean up specific references that we know will never be defined but are implicitly used by +# other domain directives. Hand convert them to literals. +def xref_cleanup(app, env, node, contnode): + rdomain = node['refdomain'] + rtype = node['reftype'] + rtarget = node['reftarget'] + if 'cpp' == rdomain: + if 'type' == rtype: + # one of the predefined type, or a pointer or reference to it. + if (rtarget in EXTERNAL_TYPES) or (('*' == rtarget[-1] or '&' == rtarget[-1]) and rtarget[:-1] in EXTERNAL_TYPES): + node = nodes.literal() + node += contnode + return node + return; + +def setup(app): + app.add_crossref_type('configfile', 'file', + objname='Configuration file', + indextemplate='pair: %s; Configuration files') + + rst.roles.register_generic_role('arg', nodes.emphasis) + rst.roles.register_generic_role('const', nodes.literal) + + app.add_domain(TrafficServerDomain) + + # Types that we want the C domain to consider built in + for word in EXTERNAL_TYPES: + sphinx.domains.c.CObject.stopwords.add(word) + + app.connect('missing-reference', xref_cleanup) diff --git a/doc/glossary.en.rst b/doc/glossary.en.rst new file mode 100644 index 00000000..94996db6 --- /dev/null +++ b/doc/glossary.en.rst @@ -0,0 +1,101 @@ +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + +============= +Glossary +============= + +.. glossary:: + :sorted: + + continuation + A callable object that contains state. These are are mechanism used by Traffic Server to implement callbacks and + continued computations. Continued computations are critical to efficient processing of traffic because by avoiding + any blocking operations that wait on external events. In any such case a continuation is used so that other + processing can continue until the external event occurs. At that point the continuation is invoked to continue the + suspended processing. This can be considered similar to co-routines. + + session + A single connection from a client to Traffic Server, covering all requests and responses on that connection. A + session starts when the client connection opens, and ends when the connection closes. + + transaction + A client request and response, either from the origin server or from the cache. A transaction begins when Traffic + Server receives a request, and ends when Traffic Server sends the response. + + cache volume + A user defined unit of persistent storage for the cache. Cache volumes are defined in :file:`volume.config`. A + cache volume is by default spread across :term:`cache span`\ s to increase robustness. Each section of a cache + volume on a specific cache span is a :term:`cache stripe`. + + cache stripe + A homogenous persistent store for the cache in a single :term:`cache span`. A stripe always resides + entirely on a single physical device and is treated as an undifferentiated span of bytes. This is the smallest + independent unit of storage. + + cache span + The physical storage described by a single line in :file:`storage.config`. + + cache key + A byte sequence that is a globally unique identifier for an :term:`object ` in the cache. By default + the URL for the object is used. + + cache ID + A 128 bit value used as a fixed sized identifier for an object in the cache. This is computed from the + :term:`cache key` using the `MD5 hashing function `_. + + cache tag + The bottom few bits (12 currently) of the :term:`cache ID`. This is used in the :ref:`cache directory ` + for a preliminary identity check before going to disk. + + cache object + The minimal self contained unit of data in the cache. Cache objects are the stored version of equivalent content + streams from an origin server. A single object can have multiple variants called :term:`alternates `. + + alternate + A variant of a :term:`cache object`. This was originally created to handle the `VARY mechanism + `_ but has since been used for additional + purposes. All alternates of an object must be equivalent in some manner, that is they are alternate forms of the + same stream. The most common example is having normal and compressed versions of the stream. + + storage unit + Obsolete term for :term:`cache span`. + + revalidation + Verifying that a currently cached object is still valid. This is usually done using an `If-Modified-Since + `_ request which allows the origin server to + validate the content without resending the content. + + write cursor + The location in a :term:`cache stripe` where new data is written. + + directory segment + A contiguous group of :term:`buckets `. Each :term:`cache stripe` has a set of segments all of + which have the same number of buckets, although the number of buckets per segment can vary between cache stripes. + Segments are administrative in purpose to minimize the size of free list and hash bucket pointers. + + directory bucket + A contiguous fixed sized group of :term:`directory entries `. This is used for hash bucket + maintenance optimization. + + directory entry + An in memory entry that describes a :term:`cache fragment`. + + cache fragment + The unit of storage in the cache. All reads from the cache always read exactly one fragment. Fragments may be + written in groups, but every write is always an integral number of fragments. Each fragment has a corresponding + :term:`directory entry` which describes its location in the cache storage. diff --git a/doc/index.rst b/doc/index.rst new file mode 100644 index 00000000..aed32f00 --- /dev/null +++ b/doc/index.rst @@ -0,0 +1,53 @@ +Apache Traffic Server +********************* + +.. Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + +Apache Traffic Server™ speeds Internet access, enhances website +performance, and delivers unprecedented web hosting capabilities. + +What Is Apache Traffic Server? +============================== + +Traffic Server is a high-performance web proxy cache that improves +network efficiency and performance by caching frequently-accessed +information at the edge of the network. This brings content physically +closer to end users, while enabling faster delivery and reduced +bandwidth use. Traffic Server is designed to improve content delivery +for enterprises, Internet service providers (ISPs), backbone +providers, and large intranets by maximizing existing and available +bandwidth. + +.. toctree:: + :maxdepth: 1 + + admin/index.en + sdk/index.en + reference/configuration/index.en + reference/commands/index.en + reference/plugins/index.en + reference/api/index.en + arch/index.en + glossary.en + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/doc/locale/ja/LC_MESSAGES/admin/cluster-howto.en.po b/doc/locale/ja/LC_MESSAGES/admin/cluster-howto.en.po new file mode 100644 index 00000000..a0f0fa8d --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/cluster-howto.en.po @@ -0,0 +1,308 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/cluster-howto.en.rst:4 +msgid "Traffic Server Cluster" +msgstr "Traffic Server クラスター" + +#: ../../admin/cluster-howto.en.rst:25 +msgid "" +"Traffic Server scales from a single node to multiple nodes that form a " +"cluster allowing you to improve system performance and reliability." +msgstr "" +"Traffic Server は単一ノードからシステムのパフォーマンスと信頼性を向上するク" +"ラスターを構成する複数ノードまでスケールします。" + +#: ../../admin/cluster-howto.en.rst:32 +msgid "Understanding Traffic Server Clusters" +msgstr "Traffic Server クラスターの理解" + +#: ../../admin/cluster-howto.en.rst:34 +msgid "" +"A Traffic Server cluster consists of multiple Traffic Server nodes. The " +"nodes in a cluster share configuration information and can form a single " +"logical cache. Traffic Server detects the addition and deletion of nodes in " +"the cluster automatically and can detect when a node is unavailable. " +"Traffic Server uses its own protocol for clustering, which is multicast for " +"node location and heartbeat, but unicast for all data exchange within the " +"cluster. Traffic Server has two clustering modes:" +msgstr "" +"Traffic Server クラスターは複数の Traffic Server ノードから構成されます。ク" +"ラスター内のノードは設定情報を共有し一つの論理的なキャッシュを形成します。" +"Traffic Server はクラスター内のノードの追加と削除を自動的に検知し、ノードが" +"利用不能になったことも検知します。Traffic Server はクラスタリングのために、" +"ノードの位置確認と生存確認にマルチキャストを、クラスター内のすべてのデータ交" +"換をユニキャストで行う独自のプロトコルを使用します。Traffic Server は二つの" +"クラスタリングモードを持ちます。" + +#: ../../admin/cluster-howto.en.rst:42 +msgid "Management-only mode; refer to `Management-Only Clustering`_ below." +msgstr "管理専用モード。`管理専用クラスタリング`_\\ を参照してください。" + +#: ../../admin/cluster-howto.en.rst:43 +msgid "Full-clustering mode; refer to `Full Clustering`_" +msgstr "" +"完全クラスタリングモード。`完全クラスタリング`_\\ を参照してください。" + +#: ../../admin/cluster-howto.en.rst:46 +msgid "Management-Only Clustering" +msgstr "管理専用クラスタリング" + +#: ../../admin/cluster-howto.en.rst:48 +msgid "" +"In management-only clustering mode, Traffic Server cluster nodes share " +"configuration information. You can administer all the nodes at the same " +"time. Traffic Server uses a multicast management protocol to provide a " +"single system image of your Traffic Server cluster. Information about " +"cluster membership, configuration, and exceptions is shared across all " +"nodes, and the :program:`traffic_manager` process automatically propagates " +"configuration changes to all the nodes." +msgstr "" +"管理専用クラスタリングモードでは、Traffic Server クラスターノードは設定情報" +"を共有します。すべてのノードを同時に管理できます。Traffic Server は Traffic " +"Server クラスターの単一のシステムイメージを提供するためにマルチキャスト管理" +"プロトコルを使用します。クラスターメンバー、設定、例外に関する情報がすべての" +"ノード間で共有され、:program:`traffic_manager` プロセスは自動的に設定の変更" +"をすべてのノードに伝搬します。" + +#: ../../admin/cluster-howto.en.rst:57 +msgid "Full Clustering" +msgstr "完全クラスタリング" + +#: ../../admin/cluster-howto.en.rst:59 +msgid "" +"In full-clustering mode, as well as sharing configuration information, a " +"Traffic Server cluster distributes its cache across its nodes into a " +"single, virtual object store, rather than replicating the cache node by " +"node. Traffic Server can provide an enormous aggregate cache size and can " +"maximize cache hit rate by storing objects only once across the entire " +"cluster." +msgstr "" +"完全クラスタリングモードでは、設定情報と同様に、Traffic Server クラスターは" +"キャッシュをノードごとに複製するのではなく、ノードをまたいだ単一の仮想オブ" +"ジェクトストアに配信します。Traffic Server はをクラスター全体にたった一度だ" +"けオブジェクトを保存する事で巨大な総キャッシュサイズの提供ととキャッシュヒッ" +"ト率の最大化を可能とします。" + +#: ../../admin/cluster-howto.en.rst:66 +msgid "" +"A fully clustered Traffic Server maps objects to specific nodes in the " +"cluster. When a node receives a request, it checks to see if the request is " +"a hit somewhere in the cluster. If the request is a hit on a different " +"node, the node handling the request obtains the object from the hit node " +"and serves it to the client. Traffic Server uses its own communication " +"protocol to obtain an object from sibling cluster nodes." +msgstr "" +"完全にクラスター化された Traffic Server はオブジェクトをクラスター内の特定の" +"ノードに割り当てます。ノードがリクエストを受け取ると、リクエストがクラスター" +"内のどこかでヒットするか確認します。リクエストが他のノードでヒットした場合、" +"ヒットしたノードからオブジェクトを読み取ってクライアントに返すようリクエスト" +"を扱います。Traffic Server は隣のクラスターノードからオブジェクトを読み取る" +"ために独自のコミュニケーションプロトコルを使用します。" + +#: ../../admin/cluster-howto.en.rst:73 +msgid "" +"If a node fails or is shut down and removed, Traffic Server removes " +"references to the missing node on all nodes in the cluster." +msgstr "" +"ノードが動作不能になったりシャットダウンされたり削除されたりすると、Traffic " +"Server はクラスター内のすべてのノード上にある失われたノードへの参照を削除し" +"ます。" + +#: ../../admin/cluster-howto.en.rst:76 +msgid "" +"Full clustering recommends a dedicated network interface for cluster " +"communication to get better performance." +msgstr "" +"完全クラスタリングはより良いパフォーマンスを得るためにクラスターのコミュニ" +"ケーションのための専用のネットワークインターフェースを推奨します。" + +#: ../../admin/cluster-howto.en.rst:80 +msgid "Enabling Clustering Mode" +msgstr "クラスタリングモードの有効化" + +#: ../../admin/cluster-howto.en.rst:82 +msgid "" +"Before you put a node into a cluster, please make sure the following things " +"are in order:" +msgstr "" +"ノードをクラスターに投入する前に、次のことが適切であることを確認してくださ" +"い。" + +#: ../../admin/cluster-howto.en.rst:85 +msgid "You are using the same operation system on all nodes:" +msgstr "すべてのノードで同じオペレーティングシステムを使用していること:" + +#: ../../admin/cluster-howto.en.rst:87 +msgid "Using the same distribution, e.g.: RHEL 5.5" +msgstr "同じディストリビューションを使用していること。例: RHEL 5.5" + +#: ../../admin/cluster-howto.en.rst:88 +msgid "Have same kernel, e.g.: 2.6.18-194.17.1.el5" +msgstr "同じカーネルを使っていること。例: 2.6.18-194.17.1.el5" + +#: ../../admin/cluster-howto.en.rst:89 +msgid "The same architecture, e.g.: ``x86_64``" +msgstr "同じアーキテクチャであること。例: ``x86_64``" + +#: ../../admin/cluster-howto.en.rst:91 +msgid "You have the same version of Traffic Server installed" +msgstr "同じバージョンの Traffic Server がインストールされていること" + +#: ../../admin/cluster-howto.en.rst:92 +msgid "The same hardware" +msgstr "同じハードウェアであること" + +#: ../../admin/cluster-howto.en.rst:93 +msgid "On the same switch or same VLAN." +msgstr "同じスイッチもしくは同じ VLAN 上にあること" + +#: ../../admin/cluster-howto.en.rst:95 +msgid "" +"Traffic Server does not apply the clustering mode change to all the nodes " +"in the cluster. You must change the clustering mode on each node " +"individually. You may following these instructions:" +msgstr "" +"Traffic Server はクラスタリングモードの変更をクラスター内のすべてのノードに" +"適用しません。クラスタリングモードは各ノードで個別に変更しなければなりませ" +"ん。次の指示に従ってください。" + +#: ../../admin/cluster-howto.en.rst:99 +msgid "" +"setup the same cluster name, with :ts:cv:`proxy.config.proxy_name`, e.g. " +"MyCluster." +msgstr "" +":ts:cv:`proxy.config.proxy_name` に同じクラスター名をセットアップする。例 " +"MyCluster" + +#: ../../admin/cluster-howto.en.rst:101 +msgid "" +"Set :ts:cv:`proxy.local.cluster.type` to ``1``, to enable cluster mode." +msgstr "" +"クラスターモードを有効化するために :ts:cv:`proxy.local.cluster.type`を " +"``1`` に設定する。" + +#: ../../admin/cluster-howto.en.rst:103 +msgid "" +"Setup a :ts:cv:`proxy.config.cluster.ethernet_interface`, e.g.: ``eth0``. " +"This should be replaced by your real interface; we recommends a dedicated " +"interface here. Refer to :ts:cv:`proxy.local.cluster.type` for a full " +"description." +msgstr "" +":ts:cv:`proxy.config.cluster.ethernet_interface` を設定する。例: ``eth0`` 。" +"これは実際のインターフェース名に置き換えてください。これは専用のインター" +"フェースにすることをお勧めします。完全な説明については :ts:cv:`proxy.local." +"cluster.type` を参照してください。" + +#: ../../admin/cluster-howto.en.rst:107 +msgid "enable: ::" +msgstr "有効化 ::" + +#: ../../admin/cluster-howto.en.rst:111 +msgid "restart: ::" +msgstr "リスタート ::" + +#: ../../admin/cluster-howto.en.rst:115 +msgid "" +"The :program:`traffic_server` and :program:`traffic_manager` processes will " +"need to restart after the change of :ts:cv:`proxy.local.cluster.type` and :" +"ts:cv:`proxy.config.cluster.ethernet_interface` have taken place." +msgstr "" +":program:`traffic_server` と :program:`traffic_manager` のプロセスは :ts:cv:" +"`proxy.local.cluster.type` と :ts:cv:`proxy.config.cluster." +"ethernet_interface` の変更が行われた後にリスタートする必要があります。" + +#: ../../admin/cluster-howto.en.rst:119 +msgid "" +"Traffic Server will join the cluster in about 10 seconds, and you can run :" +"option:`traffic_line -r` `proxy.process.cluster.nodes` to check the hosts " +"in the cluster, or check out the :file:`cluster.config` in the " +"configuration directory." +msgstr "" +"Traffic Server は約 10 秒でクラスターに加わり、クラスター内のホストを確認し" +"たり、設定ディレクトリ内の :file:`cluster.config` を確認するために :option:" +"`traffic_line -r` `proxy.process.cluster.nodes` を実行することができます。" + +#: ../../admin/cluster-howto.en.rst:125 +msgid "" +"After a successful join of the cluster, all changes of global " +"configurations on any node, will take effect on **all** nodes." +msgstr "" +"クラスターへの参加が成功した後、どのノード上であってもグローバル設定のあらゆ" +"る変更は\\ *すべて*\\ のノードに影響します。" + +#: ../../admin/cluster-howto.en.rst:129 +msgid "Deleting Nodes from a Cluster" +msgstr "クラスターからのノードの削除" + +#: ../../admin/cluster-howto.en.rst:131 +msgid "" +"To delete a node from the Traffic Server cluster, just roll back :ts:cv:" +"`proxy.local.cluster.type` to the default value 3 and reload." +msgstr "" +"Traffic Server クラスターからノードを削除するためには、単に :ts:cv:`proxy." +"config.cluster.type` をデフォルト値の 3 に戻して再読み込みを行ってください。" + +#: ../../admin/cluster-howto.en.rst:135 +msgid "Performance tweak for busy Cluster" +msgstr "多忙なクラスターのパフォーマンス調整" + +#: ../../admin/cluster-howto.en.rst:137 +msgid "" +"Starting from v3.2.0, Apache Traffic Server can handle multiple internal " +"cluster connections, and we can tweak for the Cluster threads and each of " +"the thread will keep one connection to all of the cluster machines:" +msgstr "" +"v3.2.0 から、Apache Traffic Server は複数の内部クラスター接続を扱えるように" +"なり、クラスタースレッドと各スレッドが全クラスターマシンに対して一つの接続を" +"維持するよう調整できるようになりました。" + +#: ../../admin/cluster-howto.en.rst:141 +msgid "Increasing Cluster threads:" +msgstr "クラスタースレッドの増加:" + +#: ../../admin/cluster-howto.en.rst:143 +msgid "" +"In the cluster env, the current performance of the Cluster threads will " +"consume the same cpu usage as net threads, so you may adapt the ET_NET & " +"ET_CLUSTER at about 1:1. For example, on a 24 cores system, set ET_NET " +"threads & ET_CLUSTER threads to 10, by setting :ts:cv:`proxy.config.cluster." +"threads` to ``10``." +msgstr "" +"クラスター環境では、現在のクラスタースレッドのパフォーマンスはネットスレッド" +"と同じだけ CPU を使用するので、ET_NET と ET_CLUSTER を約 1:1 に調整してもよ" +"いでしょう。例えば、24 コアのシステム上では、:ts:cv:`proxy.config.cluster." +"threads` を ``10`` に設定することで ET_NET スレッドと ET_CLUSTER スレッドを " +"10 に設定します。" + +#: ../../admin/cluster-howto.en.rst:153 +msgid "" +"with these tweaks, we can archive about 10gbps traffic for the internal " +"cluster transfer speed." +msgstr "" +"これらの調整により、内部クラスター転送速度 10gbps を達成することができます。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/configuring-cache.en.po b/doc/locale/ja/LC_MESSAGES/admin/configuring-cache.en.po new file mode 100644 index 00000000..9aadb006 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/configuring-cache.en.po @@ -0,0 +1,593 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/configuring-cache.en.rst:4 +msgid "Configuring the Cache" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:23 +msgid "" +"The Traffic Server cache consists of a high-speed object database called " +"the **object store** that indexes objects according to URLs and their " +"associated headers." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:31 +msgid "The Traffic Server Cache" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:33 +msgid "" +"The Traffic Server cache consists of a high-speed object database called " +"the **object store**. The object store indexes objects according to URLs " +"and associated headers. This enables Traffic Server to store, retrieve, and " +"serve not only web pages, but also parts of web pages - which provides " +"optimum bandwidth savings. Using sophisticated object management, the " +"object store can cache alternate versions of the same object (versions may " +"differ because of dissimilar language or encoding types). It can also " +"efficiently store very small and very large documents, thereby minimizing " +"wasted space. When the cache is full, Traffic Server removes stale data to " +"ensure the most requested objects are kept readily available and fresh." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:45 +msgid "" +"Traffic Server is designed to tolerate total disk failures on any of the " +"cache disks. If the disk fails completely, then Traffic Server marks the " +"entire disk as corrupt and continues using the remaining disks. An alarm is " +"then created to indicate which disk failed. If all of the cache disks fail, " +"then Traffic Server goes into proxy-only mode." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:51 +msgid "You can perform the following cache configuration tasks:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:53 +msgid "" +"Change the total amount of disk space allocated to the cache: refer to " +"`Changing Cache Capacity`_." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:55 +msgid "" +"Partition the cache by reserving cache disk space for specific protocols " +"and origin servers/domains: refer to `Partitioning the Cache`_." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:57 +msgid "Delete all data in the cache: refer to `Clearing the Cache`_." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:64 +msgid "The RAM Cache" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:66 +msgid "" +"Traffic Server maintains a small RAM cache of extremely popular objects. " +"This RAM cache serves the most popular objects as quickly as possible and " +"reduces load on disks, especially during temporary traffic peaks. You can " +"configure the RAM cache size to suit your needs, as described in :ref:" +"`changing-the-size-of-the-ram-cache` below." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:79 +msgid "" +"Both the **LRU** and **CLFUS** RAM caches support a configuration to " +"increase scan resistance. In a typical **LRU**, if you request all possible " +"objects in sequence, you will effectively churn the cache on every request. " +"The option :ts:cv:`proxy.config.cache.ram_cache.use_seen_filter` can be set " +"to add some resistance against this problem." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:85 +msgid "" +"In addition, **CLFUS** also supports compressing in the RAM cache itself. " +"This can be useful for content which is not compressed by itself (e.g. " +"images). This should not be confused with *Content-Encoding: gzip*, this " +"feature is only thereto save space internally in the RAM cache itself. As " +"such, it is completely transparent to the User-Agent. The RAM cache " +"compression is enabled with the option :ts:cv:`proxy.config.cache.ram_cache." +"compress`. The default is 0, which means no compression. Other possible " +"values are 1 for **fastlz**, 2 for **libz** and 3 for **liblzma**." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:99 +msgid "Changing the Size of the RAM Cache" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:101 +msgid "" +"Traffic Server provides a dedicated RAM cache for fast retrieval of popular " +"small objects. The default RAM cache size is automatically calculated based " +"on the number and size of the cache partitions you have configured. If " +"you've partitioned your cache according to protocol and/or hosts, then the " +"size of the RAM cache for each partition is proportional to the size of " +"that partition." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:108 +msgid "" +"You can increase the RAM cache size for better cache hit performance. " +"However, if you increase the size of the RAM cache and observe a decrease " +"in performance (such as increased latencies), then it's possible that the " +"operating system requires more memory for network resources. In such " +"instances, you should return the RAM cache size to its previous value." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:115 +msgid "To change the RAM cache size:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:117 +#: ../../admin/configuring-cache.en.rst:142 +#: ../../admin/configuring-cache.en.rst:155 +msgid "Stop Traffic Server." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:118 +msgid "" +"Set the variable :ts:cv:`proxy.config.cache.ram_cache.size` to specify the " +"size of the RAM cache. The default value of -1 means that the RAM cache is " +"automatically sized at approximately 1MB per gigabyte of disk." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:122 +msgid "" +"Restart Traffic Server. If you increase the RAM cache to a size of 1GB or " +"more, then restart with the :program:`trafficserver` command (refer to :ref:" +"`start-traffic-server`)." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:129 +msgid "Changing Cache Capacity" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:131 +msgid "" +"You can increase or reduce the total amount of disk space allocated to the " +"cache without clearing the content. To check the size of the cache (in " +"bytes), enter the command :option:`traffic_line -r` ``proxy.process.cache." +"bytes_total``." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:136 +msgid "Increasing Cache Capacity" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:138 +msgid "" +"To increase the total amount of disk space allocated to the cache on " +"existing disks or to add new disks to a Traffic Server node, follow the " +"steps below:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:143 +msgid "Add hardware, if necessary." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:144 +msgid "" +"Edit :file:`storage.config` to increase the amount of disk space allocated " +"to the cache on existing disks or describe the new hardware you are adding." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:146 +#: ../../admin/configuring-cache.en.rst:159 +#: ../../admin/configuring-cache.en.rst:188 +msgid "Restart Traffic Server." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:149 +msgid "Reducing Cache Capacity" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:151 +msgid "" +"To reduce the total amount of disk space allocated to the cache on an " +"existing disk or to remove disks from a Traffic Server node, follow the " +"steps below:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:156 +msgid "Remove hardware, if necessary." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:157 +msgid "" +"Edit :file:`storage.config` to reduce the amount of disk space allocated to " +"the cache on existing disks or delete the reference to the hardware you're " +"removing." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:161 +msgid "" +"In :file:`storage.config`, a formatted or raw disk must be at least 128 MB." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:166 +msgid "Partitioning the Cache" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:168 +msgid "" +"You can manage your cache space more efficiently and restrict disk usage by " +"creating cache volumes with different sizes for specific protocols. You can " +"further configure these volumes to store data from specific origin servers " +"and/or domains. The volume configuration must be the same on all nodes in " +"a :ref:`cluster `." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:175 +msgid "Creating Cache Partitions for Specific Protocols" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:177 +msgid "" +"You can create separate volumes for your cache that vary in size to store " +"content according to protocol. This ensures that a certain amount of disk " +"space is always available for a particular protocol. Traffic Server " +"currently supports the **http** partition type for HTTP objects." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:184 +msgid "To partition the cache according to protocol:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:186 +msgid "" +"Enter a line in the :file:`volume.config` file for each volume you want to " +"create" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:191 +msgid "Making Changes to Partition Sizes and Protocols" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:193 +msgid "" +"After you've configured your cache volumes based on protocol, you can make " +"changes to the configuration at any time. Before making changes, note the " +"following:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:197 +msgid "" +"You must stop Traffic Server before you change the cache volume size and " +"protocol assignment." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:199 +msgid "" +"When you increase the size of a volume, the contents of the volume are " +"*not* deleted. However, when you reduce the size of a volume, the contents " +"of the volume *are* deleted." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:202 +msgid "" +"When you change the volume number, the volume is deleted and then " +"recreated, even if the size and protocol type remain the same." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:204 +msgid "" +"When you add new disks to your Traffic Server node, volume sizes specified " +"in percentages will increase proportionately." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:206 +msgid "" +"A lot of changes to volume sizes might result in disk fragmentation, which " +"affects performance and hit rate. You should clear the cache before making " +"many changes to cache volume sizes (refer to `Clearing the Cache`_)." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:211 +msgid "Partitioning the Cache According to Origin Server or Domain" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:213 +msgid "" +"After you have partitioned the cache according to size and protocol, you " +"can assign the volumes you created to specific origin servers and/or " +"domains. You can assign a volumes to a single origin server or to multiple " +"origin servers. However, if a volumes is assigned to multiple origin " +"servers, then there is no guarantee on the space available in the volumes " +"for each origin server. Content is stored in the volumes according to " +"popularity. In addition to assigning volumes to specific origin servers and " +"domains, you must assign a generic volume to store content from all origin " +"servers and domains that are not listed. This generic volume is also used " +"if the partitions for a particular origin server or domain become corrupt. " +"If you do not assign a generic volume, then Traffic Server will run in " +"proxy-only mode." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:228 +msgid "" +"You do *not* need to stop Traffic Server before you assign volumes to " +"particular hosts or domains. However, this type of configuration is time-" +"consuming and can cause a spike in memory usage. Therefore, it's best to " +"configure partition assignment during periods of low traffic." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:234 +msgid "To partition the cache according to hostname and domain:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:236 +msgid "" +"Configure the cache volumes according to size and protocol, as described in " +"`Creating Cache Partitions for Specific Protocols`_." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:238 +msgid "" +"Create a separate volume based on protocol for each host and domain, as " +"well as an additional generic partition to use for content that does not " +"belong to these origin servers or domains. The volumes do not need to be " +"the same size." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:242 +msgid "" +"Enter a line in the :file:`hosting.config` file to allocate the volume(s) " +"used for each origin server and/or domain" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:244 +msgid "" +"Assign a generic volume to use for content that does not belong to any of " +"the origin servers or domains listed in the file. If all volumes for a " +"particular origin server become corrupt, then Traffic Server will also use " +"the generic volume to store content for that origin server as per :file:" +"`hosting.config`." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:249 +#: ../../admin/configuring-cache.en.rst:262 +msgid "" +"Run the command :option:`traffic_line -x` to apply the configuration " +"changes." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:253 +msgid "Configuring the Cache Object Size Limit" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:255 +msgid "" +"By default, Traffic Server allows objects of any size to be cached. You can " +"change the default behavior and specify a size limit for objects in the " +"cache via the steps below:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:259 +msgid "" +"Set :ts:cv:`proxy.config.cache.max_doc_size` to specify the maximum size " +"allowed for objects in the cache in bytes. ``0`` (zero) if you do not want " +"a size limit." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:268 +msgid "Clearing the Cache" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:270 +msgid "" +"When you clear the cache, you remove all data from the entire cache - " +"including data in the host database. You should clear the cache before " +"performing certain cache configuration tasks, such as partitioning. You " +"cannot clear the cache when Traffic Server is running." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:275 +msgid "To clear the cache:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:277 +msgid "" +"Stop Traffic Server (refer to :ref:`Stopping Traffic Server `)" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:278 +msgid "Enter the following command to clear the cache: ::" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:282 +msgid "" +"The ``clear`` command deletes all data in the object store and the host " +"database. Traffic Server does not prompt you to confirm the deletion." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:286 +msgid "" +"Restart Traffic Server (refer to :ref:`Starting Traffic Server `)." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:289 +msgid "Removing an Object From the Cache" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:291 +msgid "" +"Traffic Server accepts the custom HTTP request method ``PURGE`` when " +"removing a specific object from cache. If the object is found in the cache " +"and is successfully removed, then Traffic Server responds with a ``200 OK`` " +"HTTP message; otherwise, a ``404 File Not Found`` message is returned." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:297 +msgid "" +"In the following example, Traffic Server is running on the domain ``example." +"com`` and you want to remove the image ``remove_me.jpg`` from cache. " +"Because by default we do not permit ``PURGE`` requests from any other IP, " +"we connect to the daemon via localhost: ::" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:316 +msgid "" +"The next time Traffic Server receives a request for the removed object, it " +"will contact the origin server to retrieve it (i.e., it has been purged " +"from the Traffic Server cache)." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:320 +msgid "" +"Note: The procedure above only removes an object from a *specific* Traffic " +"Server cache. Users may still see the old (removed) content if it was " +"cached by intermediary caches or by the end-users' web browser." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:327 +msgid "Inspecting the Cache" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:336 +msgid "Accessing the Cache Inspector Utility" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:338 +msgid "To access the Cache Inspector utility, follow the steps below:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:340 +msgid "Set :ts:cv:`proxy.config.http_ui_enabled` to ``1``." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:341 +msgid "" +"To access the cache inspector in reverse proxy mode, you must add a remap " +"rule to :file:`remap.config` to expose the URL. This should be restricted " +"to a limited set of hosts using the ``@src_ip`` option. To restrict access " +"to the network 172.28.56.0/24, use ::" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:348 +msgid "" +"From the Traffic Server ``bin`` directory, enter the following command to " +"re-read the configuration file: ``traffic_line -x``" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:350 +msgid "" +"Open your web browser and configure it to use your Traffic Server as a " +"proxy server. Type the following URL::" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:355 +msgid "The Cache page opens." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:358 +msgid "Using the Cache Page" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:360 +msgid "" +"The **Cache page** provides several options that enable you to view and " +"delete the contents of your cache:" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:363 +msgid "" +"Click **Lookup url** to search for a particular URL in the cache. When " +"Traffic Server finds the URL in the cache, it displays details about the " +"object that corresponds to the URL (such as the header length and the " +"number of alternates). From the display page, you can delete the URL from " +"the cache." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:368 +msgid "" +"Click **Delete url** to delete a particular URL or list of URLs from the " +"cache. Traffic Server indicates if a delete is successful." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:370 +msgid "" +"Click **Regex lookup** to search for URLs that match one or more regular " +"expressions. From the display page, you can delete the URLs listed. For " +"example, enter the following to search for all URLs that end in html and " +"are prefixed with ``http://www.dianes.com``: ``http://www.dianes.com/.*\\." +"html$``" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:375 +msgid "" +"Click **Regex delete** to delete all URLs that match a specified regular " +"expression. For example, enter the following to delete all HTTP URLs that " +"end in ``html``: ``http://.*\\.html$``" +msgstr "" + +#: ../../admin/configuring-cache.en.rst:378 +msgid "" +"Click **Regex invalidate** to invalidate URLs that match a specified " +"regular expression. When you invalidate a URL, Traffic Server marks the " +"object that corresponds to the URL as stale in the cache. Traffic Server " +"then contacts the origin server to check if the object is still fresh " +"(revalidates) before serving it from the cache." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:386 +msgid "" +"Only one administrator should delete and invalidate cache entries from the " +"Cache page at any point in time. Changes made by multiple administrators at " +"the same time can lead to unpredictable results." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:58 +msgid "" +"Override cache directives for a requested domain name, regex on a url, " +"hostname or ip, with extra filters for time, port, method of the request " +"(and more). ATS can be configured to never cache; always cache; ignore no-" +"cache directives, etc. These are configured in :file:`cache.config`." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:72 +msgid "" +"The RAM cache supports two cache eviction algorithms, a regular **LRU** " +"(*Least Recently Used*) and the more advanced **CLFUS** (*Clocked Least " +"Frequently Used by Size*, which balances recentness, frequency and size to " +"maximize hit rate -- similar to a most frequently used algorithm). The " +"default is to use **CLFUS**, and this is controlled via :ts:cv:`proxy." +"config.cache.ram_cache.algorithm`." +msgstr "" + +#: ../../admin/configuring-cache.en.rst:329 +msgid "" +"Traffic Server provides a Cache Inspector utility that enables you to view, " +"delete, and invalidate URLs in the cache (HTTP only). The Cache Inspector " +"utility is a powerful tool that's capable of deleting *all* the objects in " +"your cache; therefore, make sure that only authorized administrators are " +"allowed to access this utility, see :ref:`controlling-client-access-to-" +"cache` and the ``@src_ip`` option in :file:`remap.config`." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/admin/configuring-traffic-server.en.po b/doc/locale/ja/LC_MESSAGES/admin/configuring-traffic-server.en.po new file mode 100644 index 00000000..1d0c8ccc --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/configuring-traffic-server.en.po @@ -0,0 +1,125 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/configuring-traffic-server.en.rst:5 +msgid "Configuring Traffic Server" +msgstr "Traffic Server の設定" + +#: ../../admin/configuring-traffic-server.en.rst:24 +msgid "Traffic Server provides several options for configuring the system." +msgstr "" +"Traffic Server はシステムを設定するためのいくつかの方法を提供しています。" + +#: ../../admin/configuring-traffic-server.en.rst:32 +msgid "Configure Traffic Server Using Traffic Line" +msgstr "Traffic Line を使用して Traffic Server を設定する" + +#: ../../admin/configuring-traffic-server.en.rst:34 +msgid "" +"Traffic Line enables you to quickly and easily change your Traffic Server " +"configuration via command-line interface. Alternatively, you can also use :" +"program:`traffic_shell` to configure Traffic Server." +msgstr "" +"Traffic Line はコマンドラインインターフェースで手早く簡単に Traffic Server " +"の設定を変更することを可能にします。あるいは Traffic Server を設定するため" +"に :program:`traffic_shell` を使用することもできます。" + +#: ../../admin/configuring-traffic-server.en.rst:39 +msgid "View Configuration Options in Traffic Line" +msgstr "Traffic Line で設定項目を見る" + +#: ../../admin/configuring-traffic-server.en.rst:41 +msgid "To view a configuration setting, enter the following command::" +msgstr "設定内容を見るには次のコマンドを入力してください" + +#: ../../admin/configuring-traffic-server.en.rst:45 +msgid "" +"where ``var`` is the variable associated with the configuration option. For " +"a list of variables, refer to :ref:`configuration-variables`." +msgstr "" +"``var`` の部分は設定項目に対応する変数です。変数の一覧については :ref:" +"`configuration-variables` を参照してください。" + +#: ../../admin/configuring-traffic-server.en.rst:49 +msgid "Change Configuration Options in Traffic Line" +msgstr "Traffic Line で設定項目を変更する" + +#: ../../admin/configuring-traffic-server.en.rst:51 +msgid "" +"To change the value of a configuration setting, enter the following " +"command::" +msgstr "設定内容の値を変更するには次のコマンドを入力してください。" + +#: ../../admin/configuring-traffic-server.en.rst:56 +msgid "" +"where ``var`` is the variable associated with the configuration option and " +"``value`` is the value you want to use. For a list of the variables, see :" +"ref:`configuration-variables`." +msgstr "" +"``var`` の部分は設定項目に対応する変数、``value`` は設定したい値です。変数の" +"一覧については :ref:`configuration-variables` を見てください。" + +#: ../../admin/configuring-traffic-server.en.rst:61 +msgid "Configure Traffic Server Using Configuration Files" +msgstr "設定ファイルを使用して Traffic Server を設定する" + +#: ../../admin/configuring-traffic-server.en.rst:63 +msgid "" +"As an alternative to using Traffic Line or Traffic Shell, you can change " +"Traffic Server configuration options by manually editing specific variables " +"in the :file:`records.config` file. After modifying the :file:`records." +"config` file, Traffic Server must reread the configuration files: enter " +"the Traffic Line command :option:`traffic_line -x`. You may need to restart " +"Traffic Server to apply some of the configuration changes." +msgstr "" +"Traffic Line や Traffic Shell を使用する代わりに、:file:`records.config` " +"ファイルの具体的な変数を手動で編集することで Traffic Server の設定項目を変更" +"することができます。:file:`records.config` ファイルの修正後、Traffic Server " +"は必ず設定ファイルを再読み込みしなくてはなりません。Traffic Line コマンド :" +"option:`traffic_line -x` を入力してください。いくつかの設定の変更を適用する" +"には Traffic Server を再起動する必要があるかもしれません。" + +#: ../../admin/configuring-traffic-server.en.rst:71 +msgid "The following is a sample portion of the :file:`records.config` file:" +msgstr "以下は :file:`records.config` ファイルの部分的なサンプルです" + +#: ../../admin/configuring-traffic-server.en.rst:78 +msgid "Sample records.config file" +msgstr "records.config ファイルのサンプル" + +#: ../../admin/configuring-traffic-server.en.rst:80 +msgid "" +"In addition to the :file:`records.config` file, Traffic Server provides " +"other configuration files that are used to configure specific features. You " +"can manually edit all configuration files as described in :ref:" +"`configuration-file-reference`." +msgstr "" +":file:`records.config` ファイルに加え、Traffic Server は特定の機能を設定する" +"ために使用されるその他の複数の設定ファイルを提供します。:ref:`configuration-" +"file-reference` で説明されているように、すべての設定ファイルを手動で編集する" +"ことができます。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/event-logging-formats.en.po b/doc/locale/ja/LC_MESSAGES/admin/event-logging-formats.en.po new file mode 100644 index 00000000..12d23655 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/event-logging-formats.en.po @@ -0,0 +1,1022 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: 2014-01-12 17:37+0900\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/event-logging-formats.en.rst:4 +msgid "Event Logging Formats" +msgstr "イベントロギングフォーマット" + +#: ../../admin/event-logging-formats.en.rst:23 +msgid "" +"This document provides a reference for all the different logging formats " +"Traffic Server supports. Rather than just reading about those formats, you " +"may also want to try our `online event log builder `_ for an interactive way of building and " +"understanding log formats." +msgstr "" +"このドキュメントは Traffic Server がサポートしている全ての異なるロギング" +"フォーマットについてのリファレンスを提供します。これらのフォーマットについて" +"単に読むよりも、ログフォーマットのビルドと理解をインタラクティブに行うため" +"に `online event log builder `_ を試す方がいいでしょう。" + +#: ../../admin/event-logging-formats.en.rst:36 +msgid "Custom Logging Fields" +msgstr "カスタムログギングフィールド" + +#: ../../admin/event-logging-formats.en.rst:38 +msgid "The following list describes Traffic Server custom logging fields." +msgstr "次のリストは Traffic Server のカスタムロギングフィールドの説明です。" + +#: ../../admin/event-logging-formats.en.rst:43 +msgid "``{HTTP header field name}cqh``" +msgstr "``{HTTP header field name}cqh``" + +#: ../../admin/event-logging-formats.en.rst:41 +msgid "" +"Logs the information in the requested field of the client request HTTP " +"header. For example, ``%<{Accept-Language}cqh>`` logs the ``Accept-Language:" +"`` field in client request headers." +msgstr "" +"クライアントのリクエストの HTTP ヘッダの指定されたフィールドの情報のログで" +"す。例えば ``%<{Accept-Language}cqh>`` はクライアントリクエストヘッダーの " +"``Accept-Language:`` フィールドを記録します。" + +#: ../../admin/event-logging-formats.en.rst:48 +msgid "``{HTTP header field name}pqh``" +msgstr "``{HTTP header field name}pqh``" + +#: ../../admin/event-logging-formats.en.rst:46 +msgid "" +"Logs the information in the requested field of the proxy request HTTP " +"header. For example, ``%<{Authorization}pqh>`` logs the ``Authorization:`` " +"field in proxy request headers." +msgstr "" +"プロキシーのリクエストの HTTP ヘッダーの指定されたフィールドの情報を記録しま" +"す。例えば ``%<{Authorization}pqh>`` はプロキシーリクエストヘッダー内の " +"``Authorization:`` フィールドを記録します。" + +#: ../../admin/event-logging-formats.en.rst:53 +msgid "``{HTTP header field name}psh``" +msgstr "``{HTTP header field name}psh``" + +#: ../../admin/event-logging-formats.en.rst:51 +msgid "" +"Logs the information in the requested field of the proxy response HTTP " +"header. For example, ``%<{Retry-After}psh>`` logs the ``Retry-After:`` " +"field in proxy response headers." +msgstr "" +"プロキシーのレスポンスの HTTP ヘッダーの指定されたフィールドの情報を記録しま" +"す。例えば ``%<{Retry-After}psh>`` はプロキシーレスポンスヘッダーの ``Retry-" +"After:`` フィールドを記録します。" + +#: ../../admin/event-logging-formats.en.rst:58 +msgid "``{HTTP header field name}ssh``" +msgstr "``{HTTP header field name}ssh``" + +#: ../../admin/event-logging-formats.en.rst:56 +msgid "" +"Logs the information in the requested field of the server response HTTP " +"header. For example, ``%<{Age}ssh>`` logs the ``Age:`` field in server " +"response headers." +msgstr "" +"サーバーのレスポンスの HTTP ヘッダーの指定されたフィールドの情報を記録しま" +"す。例えば ``%<{Age}ssh>`` はサーバーレスポンスヘッダーの ``Age:`` フィール" +"ドを記録します。" + +#: ../../admin/event-logging-formats.en.rst:62 +#: ../../admin/event-logging-formats.en.rst:332 +#: ../../admin/event-logging-formats.en.rst:347 +#: ../../admin/event-logging-formats.en.rst:364 +#: ../../admin/event-logging-formats.en.rst:390 +msgid "``caun``" +msgstr "``caun``" + +#: ../../admin/event-logging-formats.en.rst:61 +msgid "" +"The client authenticated username; result of the RFC931/ident lookup of the " +"client username." +msgstr "" +"クライアントの認証されたユーザー名で、RFC931/ident lookup の結果のクライアン" +"トユーザー名です。" + +#: ../../admin/event-logging-formats.en.rst:67 +#: ../../admin/event-logging-formats.en.rst:405 +msgid "``cfsc``" +msgstr "``cfsc``" + +#: ../../admin/event-logging-formats.en.rst:65 +msgid "" +"The client finish status code; specifies whether the client request to " +"Traffic Server was successfully completed (``FIN``) or interrupted " +"(``INTR``)." +msgstr "" +"クライアントの終了ステータスコード、 Traffic Server へのクライアントリクエス" +"トが正しく完了した (``FIN``) もしくは中断された (``INTR``) のどちらかが記述" +"されます。" + +#: ../../admin/event-logging-formats.en.rst:70 +#: ../../admin/event-logging-formats.en.rst:327 +#: ../../admin/event-logging-formats.en.rst:346 +#: ../../admin/event-logging-formats.en.rst:363 +#: ../../admin/event-logging-formats.en.rst:389 +msgid "``chi``" +msgstr "``chi``" + +#: ../../admin/event-logging-formats.en.rst:70 +msgid "The IP address of the client's host machine." +msgstr "クライアントのホストマシンの IP アドレス。" + +#: ../../admin/event-logging-formats.en.rst:73 +msgid "``chih``" +msgstr "``chih``" + +#: ../../admin/event-logging-formats.en.rst:73 +msgid "The IP address of the client's host machine in hexadecimal." +msgstr "16 進数表記でのクライアントのホストマシンの IP アドレス。" + +#: ../../admin/event-logging-formats.en.rst:80 +#: ../../admin/event-logging-formats.en.rst:371 +#: ../../admin/event-logging-formats.en.rst:397 +msgid "``cqbl``" +msgstr "``cqbl``" + +#: ../../admin/event-logging-formats.en.rst:79 +msgid "" +"The client request transfer length; the body length in the client request " +"to Traffic Server (in bytes)." +msgstr "" +"クライアントリクエストの転送長、Traffic Server へのクライアントリクエストの" +"ボディの長さです。( byte 単位 )" + +#: ../../admin/event-logging-formats.en.rst:84 +#: ../../admin/event-logging-formats.en.rst:373 +#: ../../admin/event-logging-formats.en.rst:399 +msgid "``cqhl``" +msgstr "``cqhl``" + +#: ../../admin/event-logging-formats.en.rst:83 +msgid "" +"The client request header length; the header length in the client request " +"to Traffic Server." +msgstr "" +"クライアントリクエストヘッダーの長さ。Traffic Server へのクライアントリクエ" +"ストのヘッダーの長さです。" + +#: ../../admin/event-logging-formats.en.rst:88 +#: ../../admin/event-logging-formats.en.rst:330 +msgid "``cqhm``" +msgstr "``cqhm``" + +#: ../../admin/event-logging-formats.en.rst:87 +msgid "" +"The HTTP method in the client request to Traffic Server: ``GET``, ``POST``, " +"and so on (subset of ``cqtx``)." +msgstr "" +"Traffic Server へのクライアントリクエストの HTTP メソッド。``GET`` や " +"``POST`` やその他のものです。( ``cqtx`` のサブセット )" + +#: ../../admin/event-logging-formats.en.rst:91 +msgid "``cqhv``" +msgstr "``cqhv``" + +#: ../../admin/event-logging-formats.en.rst:91 +msgid "The client request HTTP version." +msgstr "クライアントリクエストの HTTP バージョン。" + +#: ../../admin/event-logging-formats.en.rst:96 +msgid "``cqtd``" +msgstr "``cqtd``" + +#: ../../admin/event-logging-formats.en.rst:94 +msgid "" +"The client request timestamp. Specifies the date of the client request in " +"the format yyyy-mm-dd, where yyyy is the 4-digit year, mm is the 2-digit " +"month, and dd is the 2-digit day." +msgstr "" +"クライアントリクエストのタイムスタンプ。クライアントリクエストの日付を yyyy-" +"mm-dd のフォーマットで記述します。。yyyy は 4 桁で年、mm は 2 桁で月、dd は " +"2 桁で日を表します。" + +#: ../../admin/event-logging-formats.en.rst:100 +msgid "``cqtn``" +msgstr "``cqtn``" + +#: ../../admin/event-logging-formats.en.rst:99 +msgid "" +"The client request timestamp; date and time of the client's request (in the " +"Netscape timestamp format)." +msgstr "" +"クライアントリクエストのタイムスタンプ。クライアントのリクエストの" +"( Netscape タイムスタンプフォーマットでの) 日付と時間。" + +#: ../../admin/event-logging-formats.en.rst:103 +msgid "``cqtq``" +msgstr "``cqtq``" + +#: ../../admin/event-logging-formats.en.rst:103 +msgid "The client request timestamp, with millisecond resolution." +msgstr "ミリ秒精度のクライアントリクエストのタイムスタンプ。" + +#: ../../admin/event-logging-formats.en.rst:108 +#: ../../admin/event-logging-formats.en.rst:325 +msgid "``cqts``" +msgstr "``cqts``" + +#: ../../admin/event-logging-formats.en.rst:106 +msgid "" +"The client-request timestamp in Squid format; the time of the client " +"request since January 1, 1970 UTC. Time is expressed in seconds, with " +"millisecond resolution." +msgstr "" +"Squid フォーマットでのクライアントリクエストのタイムスタンプ。 1970 年 1 月 " +"1 日からのクライアントリクエストの時間。時間は秒単位で、ミリ秒の精度です。" + +#: ../../admin/event-logging-formats.en.rst:114 +msgid "``cqtt``" +msgstr "``cqtt``" + +#: ../../admin/event-logging-formats.en.rst:111 +msgid "" +"The client request timestamp. The time of the client request in the format " +"hh:mm:ss, where hh is the two-digit hour in 24-hour format, mm is the two-" +"digit minutes value, and ss is the 2-digit seconds value (for example, " +"16:01:19)." +msgstr "" +"クライアントリクエストのタイムスタンプ。hh:mm:ss フォーマット\n" +"でのクライアントリクエストの時間で、hh は 2 桁の 24 時間表記で、mm は 2 桁の" +"分の値、ss は 2 桁の秒の値です。(例 16:01:19)" + +#: ../../admin/event-logging-formats.en.rst:123 +msgid "``cqtx``" +msgstr "``cqtx``" + +#: ../../admin/event-logging-formats.en.rst:117 +msgid "The full HTTP client request text, minus headers; for example, ::" +msgstr "ヘッダーを除く HTTP クライアントリクエストテキストの全て。例えば" + +#: ../../admin/event-logging-formats.en.rst:121 +#: ../../admin/event-logging-formats.en.rst:129 +msgid "" +"In reverse proxy mode, Traffic Server logs the rewritten/mapped URL " +"(according to the rules in the :file:`remap.config` file), _not_ the " +"pristine/unmapped URL." +msgstr "" +"リバースプロキシーモードでは Traffic Server は pristine/unmapped URL ではな" +"く ( :file:`remap.config` ファイル内のルールに従って ) rewritten/mapped URL " +"を記録します。" + +#: ../../admin/event-logging-formats.en.rst:132 +msgid "``cqu``" +msgstr "``cqu``" + +#: ../../admin/event-logging-formats.en.rst:126 +msgid "" +"The universal resource identifier (URI) of the request from client to " +"Traffic Server (subset of ``cqtx`` )." +msgstr "" +"Traffic Server へのクライアントからのリクエストの universal resource " +"identifier (URI) (``cqtx`` のサブセット)" + +#: ../../admin/event-logging-formats.en.rst:141 +#: ../../admin/event-logging-formats.en.rst:331 +msgid "``cquc``" +msgstr "``cquc``" + +#: ../../admin/event-logging-formats.en.rst:135 +msgid "" +"The client request canonical URL. This differs from ``cqu`` in that blanks " +"(and other characters that might not be parsed by log analysis tools) are " +"replaced by escape sequences. The escape sequence is a percentage sign " +"followed by the ASCII code number in hex." +msgstr "" +"クライアントリクエストの正規化 URL。 ``cqu`` との違いは空白 ( 他のログ分析" +"ツールでパースされないかもしれない文字 ) がエスケープシーケンスに置換されて" +"いることです。" + +#: ../../admin/event-logging-formats.en.rst:141 +msgid "See `cquuc`_." +msgstr "`cquuc`_ を参照してください。" + +#: ../../admin/event-logging-formats.en.rst:149 +msgid "``cqup``" +msgstr "``cqup``" + +#: ../../admin/event-logging-formats.en.rst:144 +msgid "" +"The client request URL path; specifies the argument portion of the URL " +"(everything after the host). For example, if the URL is ``http://www." +"company.com/images/x.gif``, then this field displays ``/images/x.gif``" +msgstr "" +"クライアントリクエスト URL のパス。URL ( ホストの後ろの全て ) の項の部分を記" +"述します。例えば URL が ``http://www.company.com/images/x.gif`` であれば、こ" +"のフィールドは ``/images/x.gif`` と表示されます。" + +#: ../../admin/event-logging-formats.en.rst:149 +msgid "See `cquup`_." +msgstr "`cquup`_ を参照してください。" + +#: ../../admin/event-logging-formats.en.rst:152 +msgid "``cqus``" +msgstr "``cqus``" + +#: ../../admin/event-logging-formats.en.rst:152 +msgid "The client request URL scheme." +msgstr "クライアントリクエスト URL のスキーム。" + +#: ../../admin/event-logging-formats.en.rst:158 +msgid "``cquuc``" +msgstr "``cquuc``" + +#: ../../admin/event-logging-formats.en.rst:157 +msgid "" +"The client request unmapped URL canonical. This field records a URL before " +"it is remapped (reverse proxy mode)." +msgstr "" +"クライアントリクエストのマップされていない正規化 URL。このフィールドは リ" +"マップされる前の URL を記録します。( リバースプロキシーモード )" + +#: ../../admin/event-logging-formats.en.rst:164 +msgid "``cquup``" +msgstr "``cquup``" + +#: ../../admin/event-logging-formats.en.rst:163 +msgid "" +"The client request unmapped URL path. This field records a URL path before " +"it is remapped (reverse proxy mode)." +msgstr "" +"クライアントリクエストのマップされていない URL パス。このフィールドはリマッ" +"プされる前の URL パスを記録します。( リバースプロキシーモード )" + +#: ../../admin/event-logging-formats.en.rst:170 +msgid "``cquuh``" +msgstr "``cquuh``" + +#: ../../admin/event-logging-formats.en.rst:169 +msgid "" +"The client request unmapped URL host. This field records a URL's host " +"before it is remapped (reverse proxy mode)." +msgstr "" +"クライアントリクエストのマップされていない URL ホスト。このフィールドはリ" +"マップされる前の URL のホストを記録します。( リバースプロキシーモード )" + +#: ../../admin/event-logging-formats.en.rst:173 +msgid "``crat``" +msgstr "``crat``" + +#: ../../admin/event-logging-formats.en.rst:173 +msgid "The Retry-After time in seconds, if specified by the origin server." +msgstr "" +"オリジンサーバーによって指定されている場合、秒単位の Retry-After 時間。" + +#: ../../admin/event-logging-formats.en.rst:177 +#: ../../admin/event-logging-formats.en.rst:407 +msgid "``crc``" +msgstr "``crc``" + +#: ../../admin/event-logging-formats.en.rst:176 +msgid "" +"The cache result code; specifies how the cache responded to the request " +"(``HIT``, ``MISS``, and so on)." +msgstr "" +"キャッシュ結果コード。どんなキャッシュをリクエストへ返したかを記述します。" +"( ``HIT`` や ``MISS`` やその他 )" + +#: ../../admin/event-logging-formats.en.rst:181 +msgid "``csscl``" +msgstr "``csscl``" + +#: ../../admin/event-logging-formats.en.rst:180 +msgid "" +"The cached response length (in bytes) from origin server to Traffic Server." +msgstr "" +"キャッシュしたオリジンサーバーから Traffic Server へのレスポンスの長さ。" +"( byte 単位 )" + +#: ../../admin/event-logging-formats.en.rst:185 +msgid "``csshl``" +msgstr "``csshl``" + +#: ../../admin/event-logging-formats.en.rst:184 +msgid "" +"The cached header length in the origin server response to Traffic Server " +"(in bytes)." +msgstr "" +"キャッシュしたオリジンサーバーから Traffic Server へのヘッダーの長さ。" +"( byte 単位 )" + +#: ../../admin/event-logging-formats.en.rst:188 +msgid "``csshv``" +msgstr "``csshv``" + +#: ../../admin/event-logging-formats.en.rst:188 +msgid "The cached server response HTTP version (1.0, 1.1, etc.)." +msgstr "キャッシュしたサーバーレスポンス HTTP バージョン。( 1.0 や 1.1 等 )" + +#: ../../admin/event-logging-formats.en.rst:192 +msgid "``csssc``" +msgstr "``csssc``" + +#: ../../admin/event-logging-formats.en.rst:191 +msgid "" +"The cached HTTP response status code from origin server to Traffic Server." +msgstr "" +"キャッシュしたオリジンサーバーから Traffic Server への HTTP レスポンスステー" +"タスコード。" + +#: ../../admin/event-logging-formats.en.rst:195 +msgid "``cwr``" +msgstr "``cwr``" + +#: ../../admin/event-logging-formats.en.rst:195 +msgid "" +"The cache write result (``-``, ``WL_MISS``, ``INTR```, ``ERR`` or ``FIN``)" +msgstr "" +"キャッシュ書き込み結果。( ``-``、 ``WL_MISS``、 ``INTR```、 ``ERR`` または " +"``FIN`` )" + +#: ../../admin/event-logging-formats.en.rst:198 +msgid "``cwtr``" +msgstr "``cwtr``" + +#: ../../admin/event-logging-formats.en.rst:198 +msgid "The cache write transform result" +msgstr "キャッシュ書き込みトランスフォーム結果。" + +#: ../../admin/event-logging-formats.en.rst:201 +msgid "``fsiz``" +msgstr "``fsiz``" + +#: ../../admin/event-logging-formats.en.rst:201 +msgid "The size of the file (*n* bytes) as seen by the origin server." +msgstr "オリジンサーバーから見えるファイルサイズ。( *n* バイト )" + +#: ../../admin/event-logging-formats.en.rst:206 +#: ../../admin/event-logging-formats.en.rst:406 +msgid "``pfsc``" +msgstr "``pfsc``" + +#: ../../admin/event-logging-formats.en.rst:204 +msgid "" +"The proxy finish status code; specifies whether the Traffic Server request " +"to the origin server was successfully completed (``FIN``), interrupted " +"(``INTR``) or timed out (``TIMEOUT``)." +msgstr "" +"プロキシー終了ステータスコード。Traffic Server からオリジンサーバーへのリク" +"エストが正常に完了した (``FIN``) もしくは中断された (``INTR``) かタイムアウ" +"トした (``TIMEOUT``) かどうか。" + +#: ../../admin/event-logging-formats.en.rst:210 +msgid "``phn``" +msgstr "``phn``" + +#: ../../admin/event-logging-formats.en.rst:209 +msgid "" +"The hostname of the Traffic Server that generated the log entry in collated " +"log files." +msgstr "" +"ログファイルに収集されるログエントリーを生成した Traffic Server のホスト名。" + +#: ../../admin/event-logging-formats.en.rst:214 +msgid "``phi``" +msgstr "``phi``" + +#: ../../admin/event-logging-formats.en.rst:213 +msgid "" +"The IP of the Traffic Server that generated the log entry in collated log " +"files." +msgstr "" +"ログファイルに収集されるログエントリーを生成した Traffic Server の IP。" + +#: ../../admin/event-logging-formats.en.rst:218 +#: ../../admin/event-logging-formats.en.rst:404 +msgid "``phr``" +msgstr "``phr``" + +#: ../../admin/event-logging-formats.en.rst:217 +msgid "" +"The proxy hierarchy route; the route Traffic Server used to retrieve the " +"object." +msgstr "" +"プロキシー階層の経路。Traffic Server がオプジェクトを検索するために使用した" +"経路。" + +#: ../../admin/event-logging-formats.en.rst:222 +#: ../../admin/event-logging-formats.en.rst:372 +#: ../../admin/event-logging-formats.en.rst:398 +msgid "``pqbl``" +msgstr "``pqbl``" + +#: ../../admin/event-logging-formats.en.rst:221 +msgid "" +"The proxy request transfer length; the body length in Traffic Server's " +"request to the origin server." +msgstr "" +"プロキシーリクエスト転送長。Traffic Server からオリジンサーバーへのリクエス" +"トのボディの長さ。" + +#: ../../admin/event-logging-formats.en.rst:226 +#: ../../admin/event-logging-formats.en.rst:375 +#: ../../admin/event-logging-formats.en.rst:401 +msgid "``pqhl``" +msgstr "``pqhl``" + +#: ../../admin/event-logging-formats.en.rst:225 +msgid "" +"The proxy request header length; the header length in Traffic Server's " +"request to the origin server." +msgstr "" +"プロキシーリクエストヘッダーの長さ。Traffic Server からオリジンサーバーへの" +"リクエストのヘッダーの長さ。" + +#: ../../admin/event-logging-formats.en.rst:230 +msgid "``pqsi``" +msgstr "``pqsi``" + +#: ../../admin/event-logging-formats.en.rst:229 +msgid "" +"The proxy request server IP address (0 on cache hits and parent-ip for " +"requests to parent proxies)." +msgstr "" +"プロキシーリクエストサーバー IP アドレス。( キャッシュヒットした場合と親プロ" +"キシーへのリクエストのペアレント IP は 0 )" + +#: ../../admin/event-logging-formats.en.rst:234 +msgid "``pqsn``" +msgstr "``pqsn``" + +#: ../../admin/event-logging-formats.en.rst:233 +msgid "" +"The proxy request server name; the name of the server that fulfilled the " +"request." +msgstr "プロキシーリクエストサーバー名。リクエストを満たしたサーバーの名前。" + +#: ../../admin/event-logging-formats.en.rst:237 +#: ../../admin/event-logging-formats.en.rst:351 +#: ../../admin/event-logging-formats.en.rst:368 +#: ../../admin/event-logging-formats.en.rst:394 +msgid "``pscl``" +msgstr "``pscl``" + +#: ../../admin/event-logging-formats.en.rst:237 +msgid "The length of the Traffic Server response to the client (in bytes)." +msgstr "Traffic Server からクライアントへのレスポンスの長さ。( byte 単位 )" + +#: ../../admin/event-logging-formats.en.rst:241 +#: ../../admin/event-logging-formats.en.rst:334 +msgid "``psct``" +msgstr "``psct``" + +#: ../../admin/event-logging-formats.en.rst:240 +msgid "" +"The content type of the document from server response header: (for example, " +"``img/gif`` )." +msgstr "" +"サーバーレスポンスヘッダーのドキュメントのコンテンツタイプ。( 例えば ``img/" +"gif`` )" + +#: ../../admin/event-logging-formats.en.rst:244 +#: ../../admin/event-logging-formats.en.rst:374 +#: ../../admin/event-logging-formats.en.rst:400 +msgid "``pshl``" +msgstr "``pshl``" + +#: ../../admin/event-logging-formats.en.rst:244 +msgid "The header length in Traffic Server's response to the client." +msgstr "Traffic Server からクライアントへのレスポンスのヘッダーの長さ。" + +#: ../../admin/event-logging-formats.en.rst:248 +#: ../../admin/event-logging-formats.en.rst:329 +msgid "``psql``" +msgstr "``psql``" + +#: ../../admin/event-logging-formats.en.rst:247 +msgid "" +"The proxy response transfer length in Squid format (includes header and " +"content length)." +msgstr "" +"Squid フォーマットでのプロキシーレスポンス転送長。( ヘッダーとコンテンツの長" +"さを含む )" + +#: ../../admin/event-logging-formats.en.rst:251 +#: ../../admin/event-logging-formats.en.rst:350 +#: ../../admin/event-logging-formats.en.rst:367 +#: ../../admin/event-logging-formats.en.rst:393 +msgid "``pssc``" +msgstr "``pssc``" + +#: ../../admin/event-logging-formats.en.rst:251 +msgid "The HTTP response status code from Traffic Server to the client." +msgstr "Traffic Server からクライアントへの HTTP レスポンスステータスコード。" + +#: ../../admin/event-logging-formats.en.rst:262 +msgid "``shi``" +msgstr "``shi``" + +#: ../../admin/event-logging-formats.en.rst:254 +msgid "" +"The IP address resolved from the DNS name lookup of the host in the " +"request. For hosts with multiple IP addresses, this field records the IP " +"address resolved from that particular DNS lookup." +msgstr "" +"リクエスト内のホストの DNS 名からルックアップして解決された IP アドレス。複" +"数の IP アドレスを持つホストの場合、このフィールドは特定の DNS ルックアップ" +"により解決された IP アドレスを記録します。" + +#: ../../admin/event-logging-formats.en.rst:258 +msgid "" +"This can be misleading for cached documents. For example: if the first " +"request was a cache miss and came from **``IP1``** for server **``S``** and " +"the second request for server **``S``** resolved to **``IP2``** but came " +"from the cache, then the log entry for the second request will show " +"**``IP2``**." +msgstr "" +"これはキャッシュされたドキュメントで誤解を与えるかもしれません。例えば、最初" +"のリクエストがキャッシュミスでサーバー **``S``** へ **``IP1``** が来て、次の" +"サーバー **``S``** へのリクエストが **``IP2``** に解決され、キャッシュから来" +"た場合、2 番目のリクエストのログエントリーは **``IP2``** と記録します。" + +#: ../../admin/event-logging-formats.en.rst:265 +msgid "``shn``" +msgstr "``shn``" + +#: ../../admin/event-logging-formats.en.rst:265 +msgid "The hostname of the origin server." +msgstr "オリジンサーバーのホスト名。" + +#: ../../admin/event-logging-formats.en.rst:268 +#: ../../admin/event-logging-formats.en.rst:370 +#: ../../admin/event-logging-formats.en.rst:396 +msgid "``sscl``" +msgstr "``sscl``" + +#: ../../admin/event-logging-formats.en.rst:268 +msgid "The response length (in bytes) from origin server to Traffic Server." +msgstr "" +"オリジンサーバーから Traffic Server へのレスポンスの長さ。( byte 単位 )" + +#: ../../admin/event-logging-formats.en.rst:272 +#: ../../admin/event-logging-formats.en.rst:376 +#: ../../admin/event-logging-formats.en.rst:402 +msgid "``sshl``" +msgstr "``sshl``" + +#: ../../admin/event-logging-formats.en.rst:271 +msgid "" +"The header length in the origin server response to Traffic Server (in " +"bytes)." +msgstr "" +"オリジンサーバーが Traffic Server へ返すヘッダーの長さ。 ( byte 単位 )" + +#: ../../admin/event-logging-formats.en.rst:275 +msgid "``sshv``" +msgstr "``sshv``" + +#: ../../admin/event-logging-formats.en.rst:275 +msgid "The server response HTTP version (1.0, 1.1, etc.)." +msgstr "サーバーレスポンス HTTP バージョン。( 1.0、1.1 等 )" + +#: ../../admin/event-logging-formats.en.rst:278 +#: ../../admin/event-logging-formats.en.rst:369 +#: ../../admin/event-logging-formats.en.rst:395 +msgid "``sssc``" +msgstr "``sssc``" + +#: ../../admin/event-logging-formats.en.rst:278 +msgid "The HTTP response status code from origin server to Traffic Server." +msgstr "" +"オリジンサーバーから Traffic Server への HTTP レスポンスステータスコード。" + +#: ../../admin/event-logging-formats.en.rst:284 +#: ../../admin/event-logging-formats.en.rst:326 +msgid "``ttms``" +msgstr "``ttms``" + +#: ../../admin/event-logging-formats.en.rst:281 +msgid "" +"The time Traffic Server spends processing the client request; the number of " +"milliseconds between the time the client establishes the connection with " +"Traffic Server and the time Traffic Server sends the last byte of the " +"response back to the client." +msgstr "" +"Traffic Server がクライアントリクエストを処理するのに費やした時間。クライア" +"ントが Traffic Server とコネクションを確立した時間から Traffic Server がクラ" +"イアントにレスポンスブロックの最後のバイトを送り返した時間までのミリ秒の数。" + +#: ../../admin/event-logging-formats.en.rst:287 +msgid "``ttmsh``" +msgstr "``ttmsh``" + +#: ../../admin/event-logging-formats.en.rst:287 +msgid "Same as ``ttms`` but in hexadecimal." +msgstr "``ttms`` と同様だが 16 進法表記。" + +#: ../../admin/event-logging-formats.en.rst:298 +msgid "``ttmsf``" +msgstr "``ttmsf``" + +#: ../../admin/event-logging-formats.en.rst:290 +msgid "" +"The time Traffic Server spends processing the client request as a " +"fractional number of seconds. Time is specified in millisecond resolution; " +"however, instead of formatting the output as an integer (as with ``ttms``), " +"the display is formatted as a floating-point number representing a " +"fractional number of seconds." +msgstr "" +"少数秒での Traffic Server がクライアントリクエストを処理するのに費やした時" +"間。時間はミリ秒の精度で記述されます。しかし ( ``ttms`` のように ) 整数とし" +"てフォーマットして出力する代わりに、秒単位の少数精度の浮動小数点数でフォー" +"マットして表示します。" + +#: ../../admin/event-logging-formats.en.rst:296 +msgid "" +"For example: if the time is 1500 milliseconds, then this field displays 1.5 " +"while the ``ttms`` field displays 1500 and the ``tts`` field displays 1." +msgstr "" +"例: 時間が 1500 ミリ秒の場合、このフィールドは 1.5 と表示します。一方 " +"``ttms`` フィールドは 1500 、 ``tts`` は 1 と表示します。" + +#: ../../admin/event-logging-formats.en.rst:305 +#: ../../admin/event-logging-formats.en.rst:377 +#: ../../admin/event-logging-formats.en.rst:403 +msgid "``tts``" +msgstr "``tts``" + +#: ../../admin/event-logging-formats.en.rst:301 +msgid "" +"The time Traffic Server spends processing the client request; the number of " +"seconds between the time at which the client establishes the connection " +"with Traffic Server and the time at which Traffic Server sends the last " +"byte of the response back to the client." +msgstr "" +"Traffic Server がクライアントリクエストを処理するのに費やした時間。クライア" +"ントが Traffic Server とコネクションを確立した時間から、Traffic Server がク" +"ライアントにレスポンスの最後の byte を返した時間の間の秒数。" + +#: ../../admin/event-logging-formats.en.rst:310 +msgid "Logging Format Cross-Reference" +msgstr "ログフォーマットクロスリファレンス" + +#: ../../admin/event-logging-formats.en.rst:312 +msgid "" +"The following sections illustrate the correspondence between Traffic Server " +"logging fields and standard logging fields for the Squid and Netscape " +"formats." +msgstr "" +"次の章では Traffic Server のロギングフィールドと Squid と Netdcape フォー" +"マットの標準ロギングフィールドの対応を解説します。" + +#: ../../admin/event-logging-formats.en.rst:317 +msgid "Squid Logging Formats" +msgstr "Squid ロギングフォーマット" + +#: ../../admin/event-logging-formats.en.rst:319 +msgid "" +"The following is a list of the Squid logging fields and the corresponding " +"logging field symbols." +msgstr "" +"下記は Squid ロギングフィールドと対応するロギングフィールド記号の表です。" + +#: ../../admin/event-logging-formats.en.rst:323 +msgid "Squid" +msgstr "Squid" + +#: ../../admin/event-logging-formats.en.rst:323 +#: ../../admin/event-logging-formats.en.rst:344 +#: ../../admin/event-logging-formats.en.rst:361 +#: ../../admin/event-logging-formats.en.rst:387 +msgid "Field Symbols" +msgstr "フィールド記号" + +#: ../../admin/event-logging-formats.en.rst:325 +msgid "``time``" +msgstr "``time``" + +#: ../../admin/event-logging-formats.en.rst:326 +msgid "``elapsed``" +msgstr "``elapsed``" + +#: ../../admin/event-logging-formats.en.rst:327 +msgid "``client``" +msgstr "``client``" + +#: ../../admin/event-logging-formats.en.rst:328 +msgid "``action/code``" +msgstr "``action/code``" + +#: ../../admin/event-logging-formats.en.rst:328 +msgid "``crc/pssc``" +msgstr "``crc/pssc``" + +#: ../../admin/event-logging-formats.en.rst:329 +msgid "``size``" +msgstr "``size``" + +#: ../../admin/event-logging-formats.en.rst:330 +msgid "``method``" +msgstr "``method``" + +#: ../../admin/event-logging-formats.en.rst:331 +msgid "``url``" +msgstr "``url``" + +#: ../../admin/event-logging-formats.en.rst:332 +msgid "``ident``" +msgstr "``ident``" + +#: ../../admin/event-logging-formats.en.rst:333 +msgid "``hierarchy/from``" +msgstr "``hierarchy/from``" + +#: ../../admin/event-logging-formats.en.rst:333 +msgid "``phr/pqsn``" +msgstr "``phr/pqsn``" + +#: ../../admin/event-logging-formats.en.rst:334 +msgid "``content``" +msgstr "``content``" + +#: ../../admin/event-logging-formats.en.rst:338 +msgid "Netscape Common Logging Formats" +msgstr "Netscape Common ロギングフォーマット" + +#: ../../admin/event-logging-formats.en.rst:340 +msgid "" +"The following is a list of the Netscape Common logging fields and the " +"corresponding Traffic Server logging field symbols." +msgstr "" +"下記は Netscape Common ロギングフィールドと対応する Traffic Server ロギング" +"フィールド記号の表です。" + +#: ../../admin/event-logging-formats.en.rst:344 +msgid "Netscape Common" +msgstr "Netscape Common" + +#: ../../admin/event-logging-formats.en.rst:346 +#: ../../admin/event-logging-formats.en.rst:363 +#: ../../admin/event-logging-formats.en.rst:389 +msgid "``host``" +msgstr "``host``" + +#: ../../admin/event-logging-formats.en.rst:347 +#: ../../admin/event-logging-formats.en.rst:364 +#: ../../admin/event-logging-formats.en.rst:390 +msgid "``usr``" +msgstr "``usr``" + +#: ../../admin/event-logging-formats.en.rst:348 +#: ../../admin/event-logging-formats.en.rst:365 +#: ../../admin/event-logging-formats.en.rst:391 +msgid "``[time]``" +msgstr "``[time]``" + +#: ../../admin/event-logging-formats.en.rst:348 +#: ../../admin/event-logging-formats.en.rst:365 +#: ../../admin/event-logging-formats.en.rst:391 +msgid "``[cqtn]``" +msgstr "``[cqtn]``" + +#: ../../admin/event-logging-formats.en.rst:349 +#: ../../admin/event-logging-formats.en.rst:366 +#: ../../admin/event-logging-formats.en.rst:392 +msgid "``\"req\"``" +msgstr "``\"req\"``" + +#: ../../admin/event-logging-formats.en.rst:349 +#: ../../admin/event-logging-formats.en.rst:366 +#: ../../admin/event-logging-formats.en.rst:392 +msgid "``\"cqtx\"``" +msgstr "``\"cqtx\"``" + +#: ../../admin/event-logging-formats.en.rst:350 +#: ../../admin/event-logging-formats.en.rst:367 +#: ../../admin/event-logging-formats.en.rst:393 +msgid "``s1``" +msgstr "``s1``" + +#: ../../admin/event-logging-formats.en.rst:351 +#: ../../admin/event-logging-formats.en.rst:368 +#: ../../admin/event-logging-formats.en.rst:394 +msgid "``c1``" +msgstr "``c1``" + +#: ../../admin/event-logging-formats.en.rst:355 +msgid "Netscape Extended Logging Formats" +msgstr "Netscape Extended ロギングフォーマット" + +#: ../../admin/event-logging-formats.en.rst:357 +msgid "" +"The following table lists the Netscape Extended logging fields and the " +"corresponding Traffic Server logging field symbols." +msgstr "" +"下記は Netscape Extended ロギングフォーマットフィールドと対応する Traffic " +"Server ロギングフィールド記号の表です。" + +#: ../../admin/event-logging-formats.en.rst:361 +msgid "Netscape Extended" +msgstr "Netscape Extended" + +#: ../../admin/event-logging-formats.en.rst:369 +#: ../../admin/event-logging-formats.en.rst:395 +msgid "``s2``" +msgstr "``s2``" + +#: ../../admin/event-logging-formats.en.rst:370 +#: ../../admin/event-logging-formats.en.rst:396 +msgid "``c2``" +msgstr "``c2``" + +#: ../../admin/event-logging-formats.en.rst:371 +#: ../../admin/event-logging-formats.en.rst:397 +msgid "``b1``" +msgstr "``b1``" + +#: ../../admin/event-logging-formats.en.rst:372 +#: ../../admin/event-logging-formats.en.rst:398 +msgid "``b2``" +msgstr "``b2``" + +#: ../../admin/event-logging-formats.en.rst:373 +#: ../../admin/event-logging-formats.en.rst:399 +msgid "``h1``" +msgstr "``h1``" + +#: ../../admin/event-logging-formats.en.rst:374 +#: ../../admin/event-logging-formats.en.rst:400 +msgid "``h2``" +msgstr "``h2``" + +#: ../../admin/event-logging-formats.en.rst:375 +#: ../../admin/event-logging-formats.en.rst:401 +msgid "``h3``" +msgstr "``h3``" + +#: ../../admin/event-logging-formats.en.rst:376 +#: ../../admin/event-logging-formats.en.rst:402 +msgid "``h4``" +msgstr "``h4``" + +#: ../../admin/event-logging-formats.en.rst:377 +#: ../../admin/event-logging-formats.en.rst:403 +msgid "``xt``" +msgstr "``xt``" + +#: ../../admin/event-logging-formats.en.rst:381 +msgid "Netscape Extended-2 Logging Formats" +msgstr "Netscape Extended-2 ロギングフォーマット" + +#: ../../admin/event-logging-formats.en.rst:383 +msgid "" +"The following is a list of the Netscape Extended-2 logging fields and the " +"corresponding Traffic Server logging field symbols." +msgstr "" +"下記は Netscape Extended-2 ロギングフィールドと対応する Traffic Server ロギ" +"ングフィールド記号の表です。" + +#: ../../admin/event-logging-formats.en.rst:387 +msgid "Netscape Extended-2" +msgstr "Netscape Extended-2" + +#: ../../admin/event-logging-formats.en.rst:404 +msgid "``route``" +msgstr "``route``" + +#: ../../admin/event-logging-formats.en.rst:405 +msgid "``pfs``" +msgstr "``pfs``" + +#: ../../admin/event-logging-formats.en.rst:406 +msgid "``ss``" +msgstr "``ss``" + +#: ../../admin/event-logging-formats.en.rst:76 +msgid "``chp``" +msgstr "``chp``" + +#: ../../admin/event-logging-formats.en.rst:76 +msgid "The port number of the client's host machine." +msgstr "クライアントのホストマシンのポート番号。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/explicit-proxy-caching.en.po b/doc/locale/ja/LC_MESSAGES/admin/explicit-proxy-caching.en.po new file mode 100644 index 00000000..5c02c632 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/explicit-proxy-caching.en.po @@ -0,0 +1,123 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/explicit-proxy-caching.en.rst:4 +msgid "Explicit Proxy Caching" +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:26 +msgid "" +"If you want to use Traffic Server as an explicit proxy cache, you must " +"configure client software (i.e., browsers) to send requests directly to " +"Traffic Server." +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:30 +msgid "" +"If you do not configure Traffic Server to use the transparency option (with " +"which client requests are intercepted en route to origin servers by a " +"switch/router and rerouted to the Traffic Server machine), then clients " +"must configure their web browsers to send HTTP requests to the Traffic " +"Server proxy cache by configuring their browsers to download proxy " +"configuration instructions from a :ref:`PAC file ` " +"(Proxy Auto-Configuration file)." +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:39 +msgid "Configuring Browsers Manually" +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:41 +msgid "" +"To manually configure a browser to send HTTP requests to Traffic Server, " +"clients must provide the following information:" +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:44 +msgid "The fully-qualified hostname or IP address of the Traffic Server node" +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:45 +msgid "The Traffic Server proxy server port (port 8080)" +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:47 +msgid "" +"In addition, clients can specify *not* to use Traffic Server for certain " +"sites - in such cases, requests to the listed sites go directly to the " +"origin server. The procedures for manual configuration vary among browser " +"versions; refer to specific browser documentation for complete proxy " +"configuration instructions. You do not need to set any special " +"configuration options on Traffic Server if you want to accept requests from " +"manually-configured browsers." +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:58 +msgid "Using a PAC File" +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:60 +msgid "" +"A **PAC file** is a specialized JavaScript function definition that a " +"browser calls to determine how requests are handled. Clients must specify " +"(in their browser settings) the URL from which the PAC file is loaded. You " +"can store a PAC file on Traffic Server (or on any server in your network) " +"and then provide the URL for this file to your clients." +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:66 +msgid "" +"If you want to store a PAC file on the Traffic Server system, then you must " +"perform the following configuration:" +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:69 +msgid "" +"Either copy an existing PAC file into the Traffic Server ``config`` " +"directory or enter a script that defines the proxy server configuration " +"settings in the ``proxy.pac`` file provided. The file is empty by default. " +"A sample script is provided in `Sample PAC File`_." +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:74 +msgid "" +"Specify the port Traffic Server uses to serve the PAC file. The default " +"port is 8083, see :ts:cv:`proxy.config.admin.autoconf_port`." +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:78 +msgid "Sample PAC File" +msgstr "" + +#: ../../admin/explicit-proxy-caching.en.rst:80 +msgid "" +"The following sample PAC file instructs browsers to connect directly to all " +"hosts without a fully-qualified domain name and to all hosts in the local " +"domain. All other requests go to the Traffic Server named ``myproxy.company." +"com``.::" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/admin/faqs.en.po b/doc/locale/ja/LC_MESSAGES/admin/faqs.en.po new file mode 100644 index 00000000..6e87455e --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/faqs.en.po @@ -0,0 +1,760 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/faqs.en.rst:4 +msgid "FAQ and Troubleshooting Tips" +msgstr "FAQ と トラブルシューティングの秘訣" + +#: ../../admin/faqs.en.rst:27 +msgid "FAQs" +msgstr "FAQ" + +#: ../../admin/faqs.en.rst:30 +msgid "" +"How do you create a raw disk for the cache if all your disks have mounted " +"file systems?" +msgstr "" +"全ディスクがファイルシステムにマウントされている場合、キャッシュ用の raw " +"ディスクの作成はどうすればいいですか?" + +#: ../../admin/faqs.en.rst:37 +msgid "" +"How do disk I/O errors affect the cache and what does Traffic Server do " +"when a cache disk fails?" +msgstr "" +"ディスク I/O エラーはキャッシュにどのように影響し、キャッシュディスクが動か" +"なくなったとき Traffic Server は何を行いますか?" + +#: ../../admin/faqs.en.rst:39 +msgid "" +"If a disk drive fails five successive I/O operations, then Traffic Server " +"considers the drive inaccessible and removes the entire disk from the " +"cache. Normal cache operations continue for all other Traffic Server disk " +"drives." +msgstr "" +"ディスクドライブが I/O 操作に連続して五回失敗すると、Traffic Server はドライ" +"ブがアクセス不能とみなしキャッシュからディスク全体を外します。通常のキャッ" +"シュ操作は Traffic Server の他のすべてのディスクドライブで継続します。" + +#: ../../admin/faqs.en.rst:45 +msgid "" +"If a client disconnects during the time that Traffic Server is downloading " +"a large object, is any of the object saved in the cache?" +msgstr "" +"Traffic Server が大きいオブジェクトをダウンロード中にクライアントが接続を閉" +"じた場合、オブジェクトはキャッシュに保存されますか?" + +#: ../../admin/faqs.en.rst:47 +msgid "" +"When a client disconnects during an HTTP operation, Traffic Server " +"continues to download the object from the origin server for up to 10 " +"seconds. If the transfer from the origin server completes successfully " +"within 10 seconds after the client disconnect, then Traffic Server stores " +"the object in cache. If the origin server download does *not* complete " +"successfully within 10 seconds, then Traffic Server disconnects from the " +"origin server and deletes the object from cache. Traffic Server does not " +"store partial documents in the cache." +msgstr "" +"HTTP の処理中にクライアントが接続を閉じたとき、Traffic Server は 10 秒までオ" +"リジンサーバーからのオブジェクトのダウンロードを継続します。クライアントが接" +"続を閉じた後、オリジンサーバーからの転送が 10 秒以内に正常に完了した場合、" +"Traffic Server はオブジェクトをキャッシュに保存します。オリジンサーバーから" +"のダウンロードが 10 秒以内に正常に完了しない場合、Traffic Server はオリジン" +"サーバーとの接続を閉じ、キャッシュからオブジェクトを削除します。Traffic " +"Server は部分的なオブジェクトをキャッシュに保存しません。" + +#: ../../admin/faqs.en.rst:57 +msgid "" +"Can Traffic Server cache Java applets, JavaScript programs, or other " +"application files like VBScript?" +msgstr "" +"Traffic Server は Java アプレット、JavaScript のブログラムもしくは VBScript " +"の様なその他のアプリケーションファイルをキャッシュできますか?" + +#: ../../admin/faqs.en.rst:59 +msgid "" +"Yes, Traffic Server can store and serve Java applets, JavaScript programs, " +"VBScripts, and other executable objects from its cache according to the " +"freshness and cacheability rules for HTTP objects. Traffic Server does not " +"execute the applets, scripts, or programs, however - these objects run only " +"when the client system (ie, the one that sent the request) loads them." +msgstr "" +"はい、Traffic Server は Java アプレット、JavaScript プログラム、VBScript、そ" +"の他実行可能なオブジェクトを保存し、HTTP オブジェクトの新鮮さとキャッシュ" +"ルールにもとづいて配信することができます。Traffic Server はアプレット、スク" +"リプト、プログラムを実行せず、クライアントはこれらシステムに読み込んだときに" +"のみ実行されます。" + +#: ../../admin/faqs.en.rst:67 +msgid "" +"In Squid- and Netscape-format log files, what do the cache result codes " +"mean?" +msgstr "" +"Squid や Netscape フォーマットのログファイルで、キャッシュ結果コードは何を意" +"味していますか?" + +#: ../../admin/faqs.en.rst:69 +msgid "" +"This is described in detail in the :ref:`log-formats-squid-format` " +"documentation." +msgstr "" +"これについては :ref:`log-formats-squid-format` に詳細に説明されています。" + +#: ../../admin/faqs.en.rst:72 +msgid "What is recorded by the ``cqtx`` field in a custom log file?" +msgstr "" +"カスタムログファイル内の ``cqtx`` フィールドには何が記録されていますか?" + +#: ../../admin/faqs.en.rst:74 +msgid "" +"In **forward proxy mode**, the cqtx field records the complete client " +"request in the log file (for example, ``GET http://www.company.com " +"HTTP/1.0``)." +msgstr "" +"**フォワードプロキシーモード** では、cqtx フィールドはクライアントの完全なリ" +"クエストを記録しています (例えば、``GET http://www.company.com HTTP/1.0`` な" +"ど) 。" + +#: ../../admin/faqs.en.rst:76 +msgid "" +"In **reverse proxy mode**, the cqtx field records the hostname or IP " +"address of the origin server because Traffic Server first remaps the " +"request as per map rules in the :file:`remap.config` file." +msgstr "" +"**リバースプロキシーモード** では、Traffic Server はまずリクエストを :file:" +"`remap.config` ファイル内のマップルールごとにリマップを行うので、cqtx フィー" +"ルドはオリジンサーバーのホスト名もしくは IP アドレスを記録しています。" + +#: ../../admin/faqs.en.rst:81 +msgid "" +"Does Traffic Server refresh entries in its host database after a certain " +"period of time if they have not been used?" +msgstr "" +"Traffic Server はホストデータベース内のエントリーが一定期間使われなかった場" +"合にそれらを更新しますか?" + +#: ../../admin/faqs.en.rst:83 +msgid "" +"By default, the Traffic Server host database observes the time-to-live " +"(``ttl``) values set by name servers. You can reconfigure Traffic Server to " +"ignore the ``ttl`` set by name servers and use a specific Traffic Server " +"setting instead. Alternatively, you can configure Traffic Server to compare " +"the ``ttl`` value set by the name server with the ``ttl`` value set by " +"Traffic Server, and then use either the lower or the higher value." +msgstr "" +"デフォルトでは、Traffic Server のホストデータベースはネームサーバーにより" +"セットされた time-to-live (``ttl``) を監視します。ネームサーバーによってセッ" +"トされた ``ttl`` を無視して指定した設定を使用するように Traffic Server を再" +"設定することも可能です。もしくは、ネームサーバーによってセットされた " +"``ttl`` の値と Traffic Server の設定値を比較し、低いものか高いものかどちらか" +"を使用するように Traffic Serverを設定することも可能です。" + +#: ../../admin/faqs.en.rst:91 +msgid "see :ts:cv:`proxy.config.hostdb.ttl_mode` for more info" +msgstr "詳しくは :ts:cv:`proxy.config.hostdb.ttl_mode` を観てください。" + +#: ../../admin/faqs.en.rst:94 +msgid "" +"Can you improve the look of your custom response pages by using images, " +"animated .gifs, and Java applets?" +msgstr "" +"カスタムレスポンスメッセージの見た目を画像やアニメーション GIF や Java アプ" +"レットを使用して改善することはできますか?" + +#: ../../admin/faqs.en.rst:96 +msgid "" +"No, because Traffic Server can only respond to clients with a single text " +"or HTML document. As a workaround, however, you can provide references on " +"your custom response pages to images, animated .gifs, Java applets, or " +"objects other than text which are located on a web server. Add links in the " +"body_factory template files in the same way you would for any image in an " +"HTML document (i.e., with the full URL in the ``SRC`` attribute)." +msgstr "" +"いいえ、Traffic Server はクライアントに一つのテキストもしくは HTML ドキュメ" +"ントしか返せません。次善策ではありますが、カスタマイズレスポンスページで " +"Web サーバー上にある画像、アニメーション GIF 、Java アプレット、その他テキス" +"トではないオブジェクトの参照を提供することは可能です。HTML ドキュメントに画" +"像を追加するのと同じ方法で body_factory テンプレートファイルにリンクを追加し" +"てください (例、\\ ``SRC`` 属性に完全な URL を指定するなど) 。" + +#: ../../admin/faqs.en.rst:105 +msgid "" +"Can Traffic Server run in forward proxy and reverse proxy modes at the same " +"time?" +msgstr "" +"Traffic Server はフォワードプロキシーモードとリバースプロキシーモードを同時" +"に実行できますか?" + +#: ../../admin/faqs.en.rst:107 +msgid "" +"Yes. When you enable reverse proxy mode, Traffic Server remaps incoming " +"requests according to the map rules in the :file:`remap.config` file. All " +"other requests that do not match a map rule are simply served in forward " +"proxy mode." +msgstr "" +"はい。リバースプロキシーモードを有効にした場合、Traffic Server は入ってきた" +"リクエストを :file:`remap.config` ファイル内のマップルールにもとづいて " +"remap します。マップルールにマッチしないその他のすべてのリクエストはフォワー" +"ドプロキシーモードで単純に処理されます。" + +#: ../../admin/faqs.en.rst:112 +msgid "" +"If you want to run in reverse proxy only mode (wherein Traffic Server does " +"*not* serve requests that fail to match a map rule), then you must set the " +"configuration variable :ts:cv:`proxy.config.url_remap.remap_required` to " +"``1`` in the :file:`records.config` file." +msgstr "" +"リバースプロキシーモードのみで実行したい場合 (Traffic Server はマップルール" +"にマッチしなかったリクエストに応え\\ *ません*\\ ) は、:file:`records." +"config` ファイル内の設定変数 :ts:cv:`proxy.config.url_remap.remap_required` " +"を ``1`` にセットしなければなりません。" + +#: ../../admin/faqs.en.rst:118 +msgid "How do I enable forward proxy mode" +msgstr "どうやってフォワードプロキシーモードを有効するのですか?" + +#: ../../admin/faqs.en.rst:120 +msgid "Please refer to the :ref:`forward-proxy` documentation." +msgstr ":ref:`forward-proxy` を参照してください。" + +#: ../../admin/faqs.en.rst:123 +msgid "How do I interpret the Via: header code?" +msgstr "どうやって Via: ヘッダーコードを解釈するのですか?" + +#: ../../admin/faqs.en.rst:128 +msgid "Support for HTTP Expect: Header" +msgstr "HTTP Expect: ヘッダーのサポート" + +#: ../../admin/faqs.en.rst:130 +msgid "" +"Traffic Server currently does not handle request Expect: headers according " +"to the HTTP/1.1 spec." +msgstr "" +"Traffic Server は現在 HTTP/1.1 仕様による Expect: ヘッダーを扱いません。" + +#: ../../admin/faqs.en.rst:133 +msgid "" +"Note that clients such as cURL automatically send Expect: for POST requests " +"with large POST bodies, with a 1 second timeout if a 100 Continue response " +"is not received. To avoid the timeout when using cURL as a client to " +"Traffic Server, you can turn off the Expect: header as follows::" +msgstr "" +"cURL などのクライアントは大きい POST ボディを持つ POST リクエストで、1 秒の" +"タイムアウトで 100 Continue レスポンスを受け取らない場合に自動的に Expect: " +"ヘッダーを送信することに注意してください。cURL を Traffic Server のクライア" +"ントとして使用する際のタイムアウトを防ぐためには、次のように Expect: ヘッ" +"ダーをオフにすることができます。" + +#: ../../admin/faqs.en.rst:141 +msgid "C (libcurl)::" +msgstr "C (libcurl)::" + +#: ../../admin/faqs.en.rst:147 +msgid "php::" +msgstr "php::" + +#: ../../admin/faqs.en.rst:152 +msgid "Troubleshooting Tips" +msgstr "トラブルシューティングの秘訣" + +#: ../../admin/faqs.en.rst:155 +msgid "The throughput statistic is inaccurate" +msgstr "スループットの統計が不正確" + +#: ../../admin/faqs.en.rst:157 +msgid "" +"Traffic Server updates the throughput statistic after it has transferred an " +"entire object. For larger files, the byte count increases sharply at the " +"end of a transfer. The complete number of bytes transferred is attributed " +"to the last 10-second interval, although it can take several minutes to " +"transfer the object. This inaccuracy is more noticeable with a light load. " +"A heavier load yields a more accurate statistic." +msgstr "" +"Traffic Server はスループットの統計をオブジェクト全体を転送した後で更新しま" +"す。大きいファイルではバイト数が転送の終わりで鋭く上昇します。オブジェクトを" +"転送するのに数分かかることもありますが完全な転送バイト数は直近の 10 秒間とな" +"ります。この不正確さは負荷が軽い場合に顕著です。重い負荷はより正確な統計をも" +"たらします。" + +#: ../../admin/faqs.en.rst:165 +msgid "You are unable to execute Traffic Line commands" +msgstr "Traffic Line コマンドが実行できない" + +#: ../../admin/faqs.en.rst:167 +msgid "Traffic Line commands do not execute under the following conditions:" +msgstr "Traffic Line コマンドは次の状況で動作しません。" + +#: ../../admin/faqs.en.rst:169 +msgid "" +"**When the traffic_manager process is not running** Check to see if the :" +"program:`traffic_manager` process is running by entering the following " +"command: ``pgrep -l traffic_manager``" +msgstr "" +"**traffic_manager プロセスが実行されていない場合**\\ 次のコマンドを入力し" +"て :program:`traffic_manager` プロセスが実行中であることを確認してください。" + +#: ../../admin/faqs.en.rst:173 +msgid "" +"If the :program:`traffic_manager` process is not running, then enter the " +"following command from the Traffic Server ``bin`` directory to start it: " +"``./traffic_manager``" +msgstr "" +":program:`traffic_manager` プロセスが実行中ではない場合は、開始するために " +"Traffic Server の ``bin`` ディレクトリで次のコマンドを入力してください。``./" +"traffic_manager``" + +#: ../../admin/faqs.en.rst:179 +msgid "" +"You should always start and stop Traffic Server with the :program:" +"`trafficserver start`` and :program:`trafficserver stop` commands to ensure " +"that all the processes start and stop correctly. For more information, " +"refer to :ref:`getting-started`." +msgstr "" +"すべてのプロセスで正常に開始と終了が行われることを確実にするために、Traffic " +"Server は :program:`trafficserver start` と :program:`trafficserver stop` コ" +"マンドで開始と終了をすべきです。詳しくは :ref:`getting-started` を参照してく" +"ださい。" + +#: ../../admin/faqs.en.rst:184 +msgid "" +"**When you are not executing the command from $TSHome/bin** If the Traffic " +"Server ``bin`` directory is not in your path, then prepend the Traffic Line " +"commands with ``./`` (for example, ``./traffic_line -h``)." +msgstr "" +"**コマンドを $TSHome/bin で実行していない場合** Traffic Server の ``bin`` " +"ディレクトリがパスに含まれていない場合、Traffic Line コマンドの前に ``./`` " +"を追加してください (例えば、``./traffic_line -h``) 。" + +#: ../../admin/faqs.en.rst:188 +msgid "" +"**When multiple Traffic Server installations are present and you are not " +"executing the Traffic Line command from the active Traffic Server path " +"specified in ``/etc/trafficserver``**" +msgstr "" +"**複数のインストール済み Traffic Server が存在し、``/etc/trafficserver`` で" +"指定された Traffic Server のパスから Traffic Line コマンドが実行されていない" +"場合。**" + +#: ../../admin/faqs.en.rst:194 +msgid "" +"You observe inconsistent behavior when one node obtains an object from " +"another node in the cluster" +msgstr "" +"クラスター内の他のノードからあるノードがオブジェクトを取得する際に矛盾した振" +"る舞いを観測した" + +#: ../../admin/faqs.en.rst:196 +msgid "" +"As part of the initial system preparation, you must synchronize the clocks " +"on all nodes in your cluster. Minor time differences do not cause problems, " +"but differences of more than a few minutes can affect Traffic Server " +"operation." +msgstr "" +"最初のシステム準備の一環で、クラスター内のすべてのノードの時刻を同期しなけれ" +"ば成りません。多少の違いは問題となりませんが、数分以上のずれは Traffic " +"Server の動作に影響を与えます。" + +#: ../../admin/faqs.en.rst:201 +msgid "" +"You should run a clock synchronization daemon such as xntpd. To obtain the " +"latest version of xntpd, go to ``http://www.eecis.udel.edu/~ntp/``" +msgstr "" +"xntpd などの時刻同期デーモンを実行するべきです。xntpd の最新バージョンを取得" +"するには ``http://www.eecis.udel.edu/~ntp/`` にアクセスしてください。" + +#: ../../admin/faqs.en.rst:205 +msgid "Web browsers display an error document with a 'data missing' message" +msgstr "" +"ウェブブラウザーが 'data missing' というメッセージのエラードキュメントを表示" +"します" + +#: ../../admin/faqs.en.rst:207 +msgid "A message similar to the following might display in web browsers: ::" +msgstr "次のようなメッセージがウェブブラウザーに表示されるかもしれません。" + +#: ../../admin/faqs.en.rst:221 +msgid "Traffic Server does not resolve any websites" +msgstr "Traffic Server がどのウェブサイトも解決しません" + +#: ../../admin/faqs.en.rst:223 +msgid "" +"The browser indicates that it is contacting the host and then times out " +"with the following message: ::" +msgstr "" +"ブラウザーはホストへ接続しようとしてタイムアウトしたことを次のメッセージで示" +"しています。" + +#: ../../admin/faqs.en.rst:228 +msgid "" +"Make sure the system is configured correctly and that Traffic Server can " +"read the name resolution file:" +msgstr "" +"システムが正しく設定されていることと Traffic Server が名前解決ファイルを読み" +"込めるを確認してください。" + +#: ../../admin/faqs.en.rst:231 +msgid "" +"Check if the server can resolve DNS lookups by issuing the nslookup command " +"(for example, ``nslookup www.myhost.com``)." +msgstr "" +"nslookup コマンドでサーバーが DNS 問い合わせを解決できるか ( 例えば、" +"``nslookup www.myhost.com``) 確認してください。" + +#: ../../admin/faqs.en.rst:233 +msgid "" +"Check if the :manpage:`resolv.conf(5)` file contains valid IP addresses for " +"your DNS servers." +msgstr "" +":manpage:`resolv.conf(5)` ファイルが DNS サーバーの妥当な IP アドレスを含ん" +"でいるか確認してください。" + +#: ../../admin/faqs.en.rst:235 +msgid "" +"On some systems, if the :manpage:`resolv.conf(5)` file is unreadable or has " +"no name server entry, then the operating system uses ``localhost`` as a " +"name server. Traffic Server, however, does not use this convention. If you " +"want to use ``localhost`` as a name server, then you must add a name server " +"entry for ``127.0.0.1`` or ``0.0.0.0`` in the :manpage:`resolv.conf(5)` " +"file." +msgstr "" +"いくつかのシステムでは、:manpage:`resolv.conf(5)` ファイルが読み込めないか" +"ネームサーバーの項目がない場合、オペレーティングシステムは ``localhost`` を" +"ネームサーバーとして使用します。しかし Traffic Server はこの慣習を使用しませ" +"ん。``localhost`` をネームサーバーとして使用したい場合は、''127.0.0.1`` もし" +"くは ``0.0.0.0`` へのネームサーバーの項目を :manpage:`resolv.conf(5)` ファイ" +"ルに追加しなければ成りません。" + +#: ../../admin/faqs.en.rst:241 +msgid "" +"Check that the Traffic Server user account has permission to read the /etc/" +"resolv.conf file. If it does not, then change the file permissions to ``rw-" +"r--r--`` (``644``)" +msgstr "" +"Traffic Server のユーザーアカウントが /etc/resolv.conf ファイルを読み込む権" +"限を持っていることを確認してください。もし権限がない場合はファイルのパーミッ" +"ションを ``rw-r--r--`` (``644``) に変更してください。" + +#: ../../admin/faqs.en.rst:246 +msgid "'Maximum document size exceeded' message in the system log file" +msgstr "" +"システムログファイル内の 'Maximum document size exceeded' というメッセージ" + +#: ../../admin/faqs.en.rst:248 +msgid "The following message appears in the system log file: ::" +msgstr "次のメッセージがシステムログファイルに現れます。" + +#: ../../admin/faqs.en.rst:252 +msgid "" +"A requested object was larger than the maximum size allowed in the Traffic " +"Server cache, so Traffic Server provided proxy service for the oversized " +"object but did not cache it. To set the object size limit for the cache, " +"modify the :ts:cv:`proxy.config.cache.max_doc_size` variable in " +"the records.config file. If you do not want to limit the size of objects in " +"the cache, then set the document size to ``0`` (zero)." +msgstr "" +"リクエストされたオブジェクトが Traffic Server のキャッシュで許された最大サイ" +"ズより大きかったので、Traffic Server はその大きすぎるオブジェクトへのプロキ" +"シーは提供しましたがキャッシュは行いませんでした。キャッシュのオブジェクトサ" +"イズ制限を設定するには、records.config ファイルの :ts:cv:`proxy.config." +"cache.limits.http.max_doc_size` 設定変数を変更してください。キャッシュ内のオ" +"ブジェクトとのサイズを制限したくない場合は、ドキュメントサイズを ``0`` ( ゼ" +"ロ ) に設定してください。" + +#: ../../admin/faqs.en.rst:261 +msgid "'DrainIncomingChannel' message in the system log file" +msgstr "システムログファイル内の 'DrainIncomingChannel' というメッセージ" + +#: ../../admin/faqs.en.rst:263 +msgid "The following messages may appear in the system log file: ::" +msgstr "次のメッセージがシステムログファイルに現れるかもしれません。" + +#: ../../admin/faqs.en.rst:269 +msgid "" +"These error messages indicate that a browser is sending HTTP requests to " +"one of the Traffic Server cluster ports - either ``rsport`` (default port " +"8088) or ``mcport`` (default port 8089). Traffic Server discards the " +"request; this error does not cause any Traffic Server problems. The " +"misconfigured browser must be reconfigured to use the correct proxy port. " +"Traffic Server clusters work best when configured to use a separate network " +"interface and cluster on a private subnet, so that client machines have no " +"access to the cluster ports." +msgstr "" +"これらのエラーメッセージはブラウザーが HTTP リクエストを Traffic Server のク" +"ラスターポートに送信したことを示しています。これは ``rsport`` ( デフォルト" +"ポートは 8088) もしくは ``mcport`` ( デフォルトポートは 8089) です。Traffic " +"Server はこのリクエストを破棄します。このエラーは Traffic Server になんの問" +"題ももたらしません。間違って設定されたブラウザーは正しいプロキシーポートを使" +"用するために再設定されなければなりません。Traffic Server クラスターはクライ" +"アントマシンがクラスターポートにアクセスできないよう別のネットワークインタ" +"フェースを使用しプライベートサブネット上で使用するよう設定された場合によく動" +"作します。" + +#: ../../admin/faqs.en.rst:279 +msgid "'No cop file' message in the system log file" +msgstr "システムログファイル内の 'No cop file' というメッセージ" + +#: ../../admin/faqs.en.rst:281 +msgid "The following message appears repeatedly in the system log file: ::" +msgstr "次のメッセージがシステムログファイルに繰り返し現れます。" + +#: ../../admin/faqs.en.rst:285 +msgid "" +"The file ``var/trafficserver/no_cop`` acts as an administrative control " +"that instructs the :program:`traffic_cop` process to exit immediately " +"without starting :program:`traffic_manager` or performing any health " +"checks. The ``no_cop`` file prevents Traffic Server from starting " +"automatically when it has been stopped with the option:`trafficserver stop` " +"command. Without this static control, Traffic Server would restart " +"automatically upon system reboot. The ``no_cop`` control keeps Traffic " +"Server off until it is explicitly restarted with the ::" +msgstr "" +"``var/trafficserver/no_cop`` は :program:`traffic_cop` が :program:" +"`traffic_manager` を起動せずヘルスチェックを行わずに直ちに終了するように指示" +"する管理制御として振る舞います。``no_cop`` ファイルは Traffic Server が " +"option:`trafficserver stop` で停止されたときに自動的に開始するのを抑止しま" +"す。この静的な制御を行わないと、Traffic Server はシステムリブートにより自動" +"的に再開します。``no_cop`` 制御は" + +#: ../../admin/faqs.en.rst:296 +msgid "command." +msgstr "コマンドで明示的に再開されるまで Traffic Server を停止し続けます。" + +#: ../../admin/faqs.en.rst:300 +msgid "Warning in the system log file when manually editing vaddrs.config" +msgstr "vaddrs.config を手動で編集したときのシステムログファイルの警告" + +#: ../../admin/faqs.en.rst:302 +msgid "" +"If you manually edit the vaddrs.config file as a non-root user, then " +"Traffic Server issues a warning message in the system log file similar to " +"the following::" +msgstr "" +"vaddrs.config ファイルを非 root ユーザーとして手動で編集すると、Traffic " +"Server はシステムログファイルに次のような警告メッセージを発行します。" + +#: ../../admin/faqs.en.rst:308 +msgid "" +"You can safely ignore this message; Traffic Server *does* apply your " +"configuration edits." +msgstr "" +"このメッセージは安全に無視することができます。Traffic Server は設定ファイル" +"の編集を適用\\ *します*\\ 。" + +#: ../../admin/faqs.en.rst:312 +msgid "Traffic Server is running but no log files are created" +msgstr "Traffic Server は実行中だがログファイルが作られない" + +#: ../../admin/faqs.en.rst:314 +msgid "" +"Traffic Server only writes event log files when there is information to " +"record. If Traffic Server is idle, then it's possible/probable that no log " +"files exist. In addition:" +msgstr "" +"Traffic Server は記録する情報があるときにだけイベントログファイルに書き込み" +"ます。Traffic Server がアイドル状態の場合、ログファイルは存在しない可能性が" +"あります。" + +#: ../../admin/faqs.en.rst:318 +msgid "" +"Make sure you're looking in the correct directory. By default, Traffic " +"Server creates log files in the logs directory. Check the location of log " +"files by checking the value of the variable proxy.config.log.logfile_dir in " +"the records.config file. Check that the log directory has read/write " +"permissions for the Traffic Server user account. If the log directory does " +"not have the correct permissions, then the traffic_server process is unable " +"to open or create log files. Check that logging is enabled by checking the " +"value of the proxy.config.log.logging_enabled variable in the records." +"config file. Check that a log format is enabled. In the records.config " +"file, select the standard or custom format by editing variables in the " +"Logging Config section." +msgstr "" +"正しいディレクトリを見ていることを確認してください。デフォルトでは、Traffic " +"Server はログファイルをログディレクトリに作成します。records.config の " +"variable proxy.config.log.logfile_dir 変数の値を確認しログファイルの場所を確" +"認してください。ログディレクトリに Traffic Server ユーザーアカウントに対する" +"読み書きのパーミッションがあることを確認してください。ログディレクトリが正し" +"いパーミッションを持っていない場合、traffic_server プロセスはログファイルを" +"作成したり開いたりできません。records.config の proxy.config.log." +"logging_enabled 変数の値を確認し、ロギングが有効化されていることを確認してく" +"ださい。ログフォーマットが有効化されていることを確認してください。records." +"config ファイルで、ロギング設定セクションの変数を編集して標準もしくはカスタ" +"ムフォーマットを選択してください。" + +#: ../../admin/faqs.en.rst:332 +msgid "Traffic Server shows an error indicating too many network connections" +msgstr "Traffic Server がネットワーク接続が多すぎることを示すエラーを表示する" + +#: ../../admin/faqs.en.rst:334 +msgid "" +"By default, Traffic Server supports 8000 network connections: half of this " +"number is allocated for client connections and the remaining half is for " +"origin server connections. A **connection throttle event** occurs when " +"client or origin server connections reach 90% of half the configured limit " +"(3600 by default). When a connection throttle event occurs, Traffic Server " +"continues processing all existing connections but will not accept new " +"client connection requests until the connection count falls below the limit." +msgstr "" +"デフォルトでは、Traffic Server は 8000 のネットワーク接続に対応しています。" +"このうちの半分がクライアントとの接続に割り当てられ、残りの半分はオリジンサー" +"バーとの接続のためのものです。クライアントまたはオリジンサーバーの接続が設定" +"された制限の半分の 90% ( デフォルトでは 3600) に達すると **connection " +"throttle event** が発生します。connection throttle event が発生すると、" +"Traffic Server はすべての既存の接続の処理を継続しますが、接続数が制限を下回" +"るまで新規の接続要求を受け付けません。" + +#: ../../admin/faqs.en.rst:343 +msgid "Connection throttle events can occur under the following conditions:" +msgstr "Connection throttle event は次の条件で発生します。" + +#: ../../admin/faqs.en.rst:345 +msgid "" +"If there is a **connection spike** (e.g., if thousands of client requests " +"all reach Traffic Server at the same time). Such events are typically " +"transient and require no corrective action." +msgstr "" +"**接続のスパイク**\\ がある場合 ( 例えば、数千のクライアントのリクエストすべ" +"てが Traffic Server に同時に届いたなど ) 。このようなイベントは一般的に一時" +"的なもので修正の必要はありません。" + +#: ../../admin/faqs.en.rst:348 +msgid "" +"If there is a **service overload** (e.g., if client requests continuously " +"arrive faster than Traffic Server can service them). Service overloads " +"often indicate network problems between Traffic Server and origin servers. " +"Conversely, it may indicate that Traffic Server needs more memory, CPU, " +"cache disks, or other resources to handle the client load." +msgstr "" +"**サービス過負荷**\\ がある場合 ( 例えば、クライアントのリクエストが " +"Traffic Server が捌けるよりも早く継続的に届く場合) 。サービス過負荷は " +"Traffic Server とオリジンサーバーとの間のネットワークの問題を示していること" +"がよくあります。逆に言えば、Traffic Server はその負荷を扱うためにより多くの" +"メモリー、CPU 、キャッシュディスク、その他のリソースを必要としていることを示" +"しているかもしれません。" + +#: ../../admin/faqs.en.rst:355 +msgid "" +"If necessary, you can reset the maximum number of connections supported by " +"Traffic Server by editing the value of the :ref:`proxy.config.net." +"connections_throttle` configuration variable in the records.config file. Do " +"not increase the connection throttle limit unless the system has adequate " +"memory to handle the client connections required. A system with limited RAM " +"might need a throttle limit lower than the default value. Do not set this " +"variable below the minimum value of 100." +msgstr "" +"必要であれば、records.config ファイルの :ref:`proxy.config.net." +"connections_throttle` 設定変数を編集して Traffic Server がサポートする最大接" +"続数を再設定することができます。システムがクライアントの接続を扱うのに必要と" +"される十分なメモリーを持っていない限りは connection throttle 制限を増加しな" +"いでください。限られた RAM を持つシステムはデフォルト値よりも低い throttle " +"制限が必要かもしれません。この変数を最低値の 100 よりも低く設定しないでくだ" +"さい。" + +#: ../../admin/faqs.en.rst:365 +msgid "Low memory symptoms" +msgstr "メモリ不足の兆候" + +#: ../../admin/faqs.en.rst:367 +msgid "" +"Under heavy load, the Linux kernel can run out of RAM. This low memory " +"condition can cause slow performance and a variety of other system " +"problems. In fact, RAM exhaustion can occur even if the system has plenty " +"of free swap space." +msgstr "" +"高負荷な状態において、Linux カーネルは RAM を使い切る可能性があります。この" +"メモリの少ない状態はパフォーマンスの低下とその他様々なシステムの問題を引き起" +"こします。事実、RAM の枯渇 はシステムが豊富な空きスワップ領域を持っていたと" +"しても発生し得ます。" + +#: ../../admin/faqs.en.rst:372 +msgid "" +"Symptoms of extreme memory exhaustion include the following messages in the " +"system log files (``/var/log/messages``)::" +msgstr "" +"極限のメモリー消耗の兆候はシステムログファイル (``/var/log/messages``) の次" +"のメッセージを含みます。" + +#: ../../admin/faqs.en.rst:381 +msgid "" +"To avoid memory exhaustion, add more RAM to the system or reduce the load " +"on Traffic Server." +msgstr "" +"メモリーの消耗を防ぐために、システムにもっと RAM を追加するか Traffic " +"Server の負荷を減らしてください。" + +#: ../../admin/faqs.en.rst:385 +msgid "Connection timeouts with the origin server" +msgstr "オリジンサーバーとの接続タイムアウト" + +#: ../../admin/faqs.en.rst:387 +msgid "" +"Certain origin servers take longer than 30 seconds to post HTTP requests, " +"which results in connection timeouts with Traffic Server. To prevent such " +"connection timeouts, you must change the value of the configuration " +"variable proxy.config.http.connect_attempts_timeout in the records.config " +"file to 60 seconds or more." +msgstr "" +"いくらかのオリジンサーバーが HTTP リクエストを投稿するのに 30 秒以上を費や" +"し、これが Traffic Server で接続タイムアウトとなります。このような接続タイム" +"アウトを防ぐためには、records.config ファイルの proxy.config.http." +"connect_attempts_timeout 設定変数を 60 秒かそれ以上に変更しなければなりませ" +"ん。" + +#: ../../admin/faqs.en.rst:32 +msgid "" +"Create a large file on filesystem (with :manpage:`dd(1)`) and mount it as " +"loopback device. This is accomplished with :manpage:`losetup(8)` on Linux, :" +"manpage:`lofiadm(1m)` on Solaris and Illumos, and :manpage:`mdconfig(8)` on " +"FreeBSD." +msgstr "" +"ファイルシステム上に大きなファイルを ( :manpage:`dd(1)` を使って ) 作成し、" +"それをループバックデバイスとしてマウントしてください。これは Linux 上では :" +"manpage:`losetup(8)` で、Solaris と Illumos 上では :manpage:`lofiadm(1m)` 、" +"FreeBSD 上では :manpage:`mdconfig(8)` を使って行えます。" + +#: ../../admin/faqs.en.rst:125 +msgid "" +"The ``Via`` header string can be decoded with the `Via Decoder Ring `_." +msgstr "" +"``Via`` ヘッダー文字列は `Via Decoder Ring `_ でデコードできます。" + +#: ../../admin/faqs.en.rst:213 +msgid "" +"This is a Web browser issue and not a problem specific to (or caused by) " +"Traffic Server. Because Web browsers maintain a separate local cache in " +"memory and/or disk on the client system, messages about documents that have " +"expired from cache refer to the browser's local cache and *not* to the " +"Traffic Server cache. There is no Traffic Server message or condition that " +"can cause such messages to appear in a web browser." +msgstr "" +"これはウェブブラウザーの課題であり、Traffic Server に限ったもの ( もしく" +"は引き起こされるもの ) ではありません。ウェブブラウザーはクライアントシス" +"テム上にメモリと/またはディスクに別にローカルキャッシュを持っているので、" +"有効期限の切れたドキュメントに関するメッセージは Traffic Server のキャッ" +"シュでは\\ *なく*\\ ブラウザーのローカルキャッシュを参照します。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/forward-proxy.en.po b/doc/locale/ja/LC_MESSAGES/admin/forward-proxy.en.po new file mode 100644 index 00000000..fa6bbf12 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/forward-proxy.en.po @@ -0,0 +1,154 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/forward-proxy.en.rst:4 +msgid "Forward Proxy" +msgstr "フォワードプロキシー" + +#: ../../admin/forward-proxy.en.rst:23 +msgid "" +"The Apache Traffic Server is a general purpose *proxy*. As such it can also " +"be used as forward proxy." +msgstr "" +"Apache Traffic Server は汎用の\\ *プロキシー*\\ です。つまりフォワードプロキ" +"シーとしても使用できます。" + +#: ../../admin/forward-proxy.en.rst:26 +msgid "" +"A forward proxy is can be used as a central tool in your infrastructure to " +"access the web. In combination with a cache that means overall reduced " +"bandwidth usage." +msgstr "" +"フォワードプロキシーは Web へアクセスするためのインフラの中心となるツールと" +"して使われます。キャッシュと組み合わせることで使用帯域を削減できます。" + +#: ../../admin/forward-proxy.en.rst:30 +msgid "" +"If your forward proxy is not also configured as :ref:`transparent-proxy` " +"your clients will have to be configured to actually use it." +msgstr "" +"フォワードプロキシーが :ref:`transparent-proxy` として設定されていない場合は" +"クライアントがそれを使うように設定する必要があるでしょう。" + +#: ../../admin/forward-proxy.en.rst:33 +msgid "" +"The main difference between a forward and a transparent proxy is that User " +"Agents *know* that they are accessing a proxy, thus forming their requests " +"like so: ::" +msgstr "" +"フォワードプロキシーと透過プロキシーの主な違いはユーザーエージェントがプロキ" +"シーにアクセスしていることを\\ *知っている*\\ ことで、次のようにリクエストを" +"行います。::" + +#: ../../admin/forward-proxy.en.rst:39 +msgid "This request, then is translated by the proxy to::" +msgstr "このリクエストはプロキシーによって次のように変換されます。::" + +#: ../../admin/forward-proxy.en.rst:44 +msgid "" +"Apache Traffic Server offers two ways to User Agents: They can either be " +"pointed directly to the default ``8080`` port. Alternatively, they can be " +"pointed to the more dynamic :ts:cv:`proxy.config.url_remap." +"default_to_server_pac`" +msgstr "" +"Apache Traffic Server はユーザーエージェントに二通りの方法を提供します。どち" +"らもデフォルトの ``8080`` ポートを直接指定できます。あるいは、より動的にも指" +"定できます。:ts:cv:`proxy.config.url_remap.default_to_server_pac`" + +#: ../../admin/forward-proxy.en.rst:48 +msgid "" +"This port will then serve a JavaScript like configuration that User Agents " +"can use to determine where to send their requests to." +msgstr "" +"このポートはユーザーエージェントがどこにリクエストを送るのかを決定するための" +"設定ファイルのような JavaScript を返します。" + +#: ../../admin/forward-proxy.en.rst:52 +msgid "Configuration" +msgstr "設定" + +#: ../../admin/forward-proxy.en.rst:57 +msgid ":ts:cv:`proxy.config.url_remap.remap_required` to ``0``" +msgstr ":ts:cv:`proxy.config.url_remap.remap_required` を ``0`` にする" + +#: ../../admin/forward-proxy.en.rst:59 +msgid "" +"If your proxy is serving as *pure* forward proxy, you will also want to set" +msgstr "" +"プロキシーが\\ *ピュアな*\\ フォワードプロキシーの場合、次の設定も必要となる" +"でしょう。" + +#: ../../admin/forward-proxy.en.rst:62 +msgid ":ts:cv:`proxy.config.reverse_proxy.enabled` to ``0``" +msgstr ":ts:cv:`proxy.config.reverse_proxy.enabled` を ``0`` にする" + +#: ../../admin/forward-proxy.en.rst:64 +msgid "Other configuration variables to consider:" +msgstr "その他に考慮すべき設定変数は次のとおりです。" + +#: ../../admin/forward-proxy.en.rst:66 +msgid ":ts:cv:`proxy.config.http.no_dns_just_forward_to_parent`" +msgstr ":ts:cv:`proxy.config.http.no_dns_just_forward_to_parent`" + +#: ../../admin/forward-proxy.en.rst:67 +msgid ":ts:cv:`proxy.config.http.forward.proxy_auth_to_parent`" +msgstr ":ts:cv:`proxy.config.http.forward.proxy_auth_to_parent`" + +#: ../../admin/forward-proxy.en.rst:68 +msgid ":ts:cv:`proxy.config.http.insert_squid_x_forwarded_for`" +msgstr ":ts:cv:`proxy.config.http.insert_squid_x_forwarded_for`" + +#: ../../admin/forward-proxy.en.rst:71 +msgid "Security Considerations" +msgstr "セキュリティの考慮" + +#: ../../admin/forward-proxy.en.rst:73 +msgid "" +"It's important to note that once your Apache Traffic Server is configured " +"as forward proxy it will indiscriminately accept proxy requests from " +"anyone. That means, if it's reachable on the internet, you have configured " +"an *Open Proxy*. Most of the time, this is *not* what you want, so you'll " +"have to make sure it's either only reachable within your NAT or is secured " +"by firewall rules that permit only those clients to access it which you " +"want to it to access." +msgstr "" +"Apache Traffic Server をフォワードプロキシーとして設定すると誰からのリクエス" +"トでも無差別に受け付けてしまうことに注意することが重要です。これは、もしイン" +"ターネットからアクセス可能な場合、\\ *オープンプロキシー*\\ を作ってしまった" +"ことを意味します。ほとんどの場合これは望んでいるものでは\\ *ない*\\ ので、" +"NAT からのみアクセス可能、もしくはファイアーウォールのルールによりアクセスさ" +"せたいクライアントだけにアクセスを許すよう保護されることを確かにしておく必要" +"があります。" + +#: ../../admin/forward-proxy.en.rst:54 +msgid "" +"In order to configure Apache Traffic Server as forward proxy you will have " +"to edit :file:`records.config` and set" +msgstr "" +"Apache Traffic Server をフォワードプロキシーとして設定するためには :file:" +"`records.config` を編集し次の設定を行う必要があります。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/getting-started.en.po b/doc/locale/ja/LC_MESSAGES/admin/getting-started.en.po new file mode 100644 index 00000000..00edebde --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/getting-started.en.po @@ -0,0 +1,348 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: 2013-11-23 01:06+0900\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/getting-started.en.rst:5 +msgid "Getting Started" +msgstr "はじめに" + +#: ../../admin/getting-started.en.rst:29 +msgid "Before you start" +msgstr "始める前に" + +#: ../../admin/getting-started.en.rst:31 +msgid "" +"Before you get started with Traffic Server you may have to decide which " +"version you want to use. Traffic Server follows the `Semantic Versioning " +"`_ guidelines, in summary" +msgstr "" +"Traffic Server を使い始める前に、どのバージョンを使うか決める必要がありま" +"す。Traffic Server は `Semantic Versioning`_ ガイドライン" +"に従っています。" + +#: ../../admin/getting-started.en.rst:35 +msgid "A version is made of a version-triplet: ``MAJOR.MINOR.PATCH``" +msgstr "" +"バージョンは次のような 3つ組みのバージョンで構成されています。 ``MAJOR." +"MINOR.PATCH``" + +#: ../../admin/getting-started.en.rst:37 +msgid "" +"As of v4.0.0, there are no longer any development (or unstable) releases. " +"All releases are considered stable and ready for production use, releases " +"within a major version are always upgrade compatible. More details are " +"available on the `Wiki page `_." +msgstr "" +"v4.0.0 から開発版(もしくは非安定版) リリースを行わなくなりました。全てのリ" +"リースは安定性を考慮され、プロダクションでの使用の準備ができています。メ" +"ジャーバージョンの同じリリースは常に後方互換性を持っています。詳しくは " +"`Wiki page `_ にあります。" + +#: ../../admin/getting-started.en.rst:43 +msgid "" +"Sometimes we speak of trunk, master or HEAD, all of which are used " +"interchangeably: trunk or master or sometimes TIP or HEAD, refer to the " +"latest code in a Git Version Control System. Master is always kept " +"releasable, and compatible with the current major release version. " +"Incompatible changes are sometimes committed on a next-major release " +"branch, for example we have the ``5.0.x`` branch where changes incompatible " +"with 4.x are managed." +msgstr "" +"トランクやマスターもしくはヘッドと言うこともありますが、これらは同じ意味で使" +"われています。トランクやマスターまた TIP や HEAD は Git Version Control " +"System での最新のコードを指します。マスターは常にリリース可能な状態に保たれ" +"ており、カレントのメジャーリリースバージョンと互換性があります。互換性のない" +"変更は next-major リリースブランチにコミットされることがあります。例えば 4." +"x と互換性のない変更が管理されている ``5.0.x`` というブランチがあります。" + +#: ../../admin/getting-started.en.rst:50 +msgid "" +"If your distribution does not come with a prepackaged Traffic Server, " +"please go to `downloads `_ to choose the version that you " +"consider most appropriate for yourself. If you want to really be on the " +"bleeding edge you can clone our `git repository `_." +msgstr "" +"お使いのディストリビューションが Traffic Server をあらかじめパッケージしてい" +"ない場合、 `downloads `_ へ行き、最も適切だと考えられるバージョ" +"ンを選んでください。最先端のものが本当に欲しい場合は `git repository " +"`_ をクローンして" +"ください。" + +#: ../../admin/getting-started.en.rst:56 +msgid "" +"Please note that while we do have a `GitHub Mirror `_ that you can also use to submit pull requests, it " +"may not be entirely up-to-date." +msgstr "" +"Pull-Request を送ることができる `GitHub Mirror `_ を持っていますが、これは最新版ではないかもしれないことに注" +"意してください。" + +#: ../../admin/getting-started.en.rst:61 +msgid "Building Traffic Server" +msgstr "Traffic Server のビルド" + +#: ../../admin/getting-started.en.rst:63 +msgid "" +"In order to build Traffic Server from source you will need the following " +"(development) packages:" +msgstr "" +"Traffic Server をソースコードからビルドするために、次の(開発)パッケージが必" +"要です。" + +#: ../../admin/getting-started.en.rst:66 +msgid "pkgconfig" +msgstr "pkgconfig" + +#: ../../admin/getting-started.en.rst:67 +msgid "libtool" +msgstr "libtool" + +#: ../../admin/getting-started.en.rst:68 +msgid "gcc (>= 4.3 or clang > 3.0)" +msgstr "gcc (>= 4.3 or clang > 3.0)" + +#: ../../admin/getting-started.en.rst:69 +msgid "make (GNU Make!)" +msgstr "make (GNU Make!)" + +#: ../../admin/getting-started.en.rst:70 +msgid "openssl" +msgstr "openssl" + +#: ../../admin/getting-started.en.rst:71 +msgid "tcl" +msgstr "tcl" + +#: ../../admin/getting-started.en.rst:72 +msgid "expat" +msgstr "expat" + +#: ../../admin/getting-started.en.rst:73 +msgid "pcre" +msgstr "pcre" + +#: ../../admin/getting-started.en.rst:74 +msgid "libcap" +msgstr "libcap" + +#: ../../admin/getting-started.en.rst:75 +msgid "flex (for TPROXY)" +msgstr "flex (for TPROXY)" + +#: ../../admin/getting-started.en.rst:76 +msgid "hwloc" +msgstr "hwloc" + +#: ../../admin/getting-started.en.rst:77 +msgid "lua" +msgstr "lua" + +#: ../../admin/getting-started.en.rst:78 +msgid "curses" +msgstr "curses" + +#: ../../admin/getting-started.en.rst:81 +msgid "if you're building from a git clone, you'll also need" +msgstr "git クローンからビルドする場合は、次のものも必要です。" + +#: ../../admin/getting-started.en.rst:83 +msgid "git" +msgstr "git" + +#: ../../admin/getting-started.en.rst:84 +msgid "autoconf" +msgstr "autoconf" + +#: ../../admin/getting-started.en.rst:85 +msgid "automake" +msgstr "automake" + +#: ../../admin/getting-started.en.rst:87 +msgid "We will show-case a build from git::" +msgstr "git からビルドする例をお見せしましょう。" + +#: ../../admin/getting-started.en.rst:91 +msgid "Next, we ``cd trafficserver`` and run::" +msgstr "次に、``cd trafficserver`` を実行し、次のコマンドを実行します。" + +#: ../../admin/getting-started.en.rst:95 +msgid "" +"This will generate a ``configure`` file from ``configure.ac``, so now we " +"can run that::" +msgstr "" +"これは ``configure`` ファイルを ``configure.ac`` から生成するので、次のコマ" +"ンドを実行できます。" + +#: ../../admin/getting-started.en.rst:100 +msgid "" +"Note well, that by default Traffic Server uses the user ``nobody``, as well " +"as user's primary group as Traffic Server user. If you want to change that, " +"you can override it here::" +msgstr "" +"デフォルトでは Traffic Server ユーザーとして ``nobody`` ユーザーを使用するこ" +"とに注意してください。プライマリーグループについても同様です。これを変更した" +"い場合、上書きすることができます。" + +#: ../../admin/getting-started.en.rst:106 +msgid "" +"If dependencies are not in standard paths (``/usr/local`` or ``/usr``), you " +"need to pass options to ``configure`` to account for that::" +msgstr "" +"標準的なパス ( ``/usr/local`` や ``/usr`` ) に依存関係がない場合、次のよう" +"に ``configure`` にオプションを通す必要があります。" + +#: ../../admin/getting-started.en.rst:111 +msgid "" +"Most ``configure`` path-options accept a format of ``\"INCLUDE_PATH:" +"LIBRARY_PATH\"``::" +msgstr "" +"ほとんどの ``configure`` パスオプションは ``\"INCLUDE_PATH:LIBRARY_PATH\"`` " +"というフォーマットを受け入れます。" + +#: ../../admin/getting-started.en.rst:117 +msgid "" +"We can run ``make`` to build the project. We highly recommend to run ``make " +"check`` to verify the build's general sanity::" +msgstr "" +"プロジェクトをビルドするために ``make`` コマンドを実行しましょう。ビルドの一" +"般的な正常さを確かめるために ``make check`` コマンドを実行することを強く推奨" +"します。" + +#: ../../admin/getting-started.en.rst:123 +msgid "" +"We can finally run ``make install`` to install (you may have to switch to " +"root to do this)::" +msgstr "" +"最後に ``make install`` コマンドを実行しましょう。(おそらく root になること" +"が必要でしょう)" + +#: ../../admin/getting-started.en.rst:128 +msgid "" +"We also recommend to run a regression test. Please note that this will only " +"work successfully with the default ``layout``::" +msgstr "" +"レグレッションテストを実行することも推奨します。これはデフォルト``レイアウト" +"``で正常に動作することに注意してください。" + +#: ../../admin/getting-started.en.rst:134 +msgid "" +"After you have installed Traffic Server on your system, you can do any of " +"the following:" +msgstr "" +"Traffic Server をシステム上にインストールした後、次のどれでもできます。" + +#: ../../admin/getting-started.en.rst:140 +msgid "Start Traffic Server" +msgstr "Traffic Server を起動する" + +#: ../../admin/getting-started.en.rst:142 +msgid "" +"To start Traffic Server manually, issue the ``trafficserver`` command, " +"passing in the attribute ``start``. This command starts all the processes " +"that work together to process Traffic Server requests as well as manage, " +"control, and monitor the health of the Traffic Server system." +msgstr "" +"Traffic Server を手動で起動するには ``trafficserver`` コマンドに ``start`` " +"を発行します。このコマンドは Traffic Server へのリクエストを処理したり、" +"Traffic Server システムの状態を管理、制御、監視するためのすべてのプロセスを" +"起動します。" + +#: ../../admin/getting-started.en.rst:147 +msgid "To run the ``trafficserver start`` command, e.g.::" +msgstr "``trafficserver start`` コマンドを実行するには次のようにします。" + +#: ../../admin/getting-started.en.rst:151 +msgid "" +"At this point your server is up and running in the default configuration of " +"a :ref:`reverse-proxy-and-http-redirects`." +msgstr "" +"この時点でサーバーは :ref:`../reverse-proxy-http-redirects` のデフォルト設定" +"で起動し、動作しています。" + +#: ../../admin/getting-started.en.rst:157 +msgid "Start Traffic Line" +msgstr "Traffic Line を使用する" + +#: ../../admin/getting-started.en.rst:159 +msgid "" +"Traffic Line provides a quick way of viewing Traffic Server statistics and " +"configuring the Traffic Server system via command-line interface. To " +"execute individual commands or script multiple commands, refer to :program:" +"`traffic_line`." +msgstr "" +"Traffic Line は Traffic Server の統計を見たり、コマンドラインインターフェー" +"スによる Traffic Server の設定をする簡単な方法を提供しています。独立したコマ" +"ンドの実行または複数のコマンドのスクリプトについては :program:" +"`traffic_line` を参照してください。" + +#: ../../admin/getting-started.en.rst:164 +msgid "Traffic Line commands take the following form::" +msgstr "Traffic Line コマンドは次のようなフォームを受け取ります。" + +#: ../../admin/getting-started.en.rst:168 +msgid "For a list of :program:`traffic_line` commands, enter::" +msgstr "" +":program:`traffic_line` のコマンドのリストを見るにはこのように実行してくださ" +"い。" + +#: ../../admin/getting-started.en.rst:172 +msgid "" +"Please note that :program:`traffic_line`, while a fine tool for an " +"administrator, is a poor choice for automation, especially that of " +"monitoring. See our chapter on :ref:`monitoring-traffic` for how to do that " +"better." +msgstr "" +"次のことに注意してください。 :program:`traffic_line` は管理者にとっては十分" +"なツールである一方で、自動化のためには貧弱な選択です。とくに監視については。" +"正しく監視する方法については :ref:`monitoring-traffic` の章を参照してくださ" +"い。" + +#: ../../admin/getting-started.en.rst:180 +msgid "Stop Traffic Server" +msgstr "Traffic Server を停止する" + +#: ../../admin/getting-started.en.rst:182 +msgid "" +"To stop Traffic Server, always use the :program:`trafficserver` command, " +"passing in the attribute ``stop``. This command stops all the Traffic " +"Server processes (:program:`traffic_manager`, :program:`traffic_server`, " +"and :program:`traffic_cop`). Do not manually stop processes, as this can " +"lead to unpredictable results.::" +msgstr "" +"Traffic Server を停止するには常に :program:`trafficserver` コマンドに " +"``stop`` を渡します。このコマンドは全ての Traffic Server プロセス ( :" +"program:`traffic_manager`, :program:`traffic_server`, :program:" +"`traffic_cop` ) を停止します。予想できない結果を招くので、手動で各プロセスを" +"止めないでください。" + +#: ../../admin/getting-started.en.rst:79 +msgid "curl (both for :program:`traffic_top`)" +msgstr "curl (both for :program:`traffic_top`)" diff --git a/doc/locale/ja/LC_MESSAGES/admin/hierachical-caching.en.po b/doc/locale/ja/LC_MESSAGES/admin/hierachical-caching.en.po new file mode 100644 index 00000000..86cdbf57 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/hierachical-caching.en.po @@ -0,0 +1,184 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/hierachical-caching.en.rst:4 +msgid "Hierarchical Caching" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:28 +msgid "Understanding Cache Hierarchies" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:30 +msgid "" +"A cache hierarchy consists of cache levels that communicate with each " +"other. Traffic Server supports several types of cache hierarchies. All " +"cache hierarchies recognize the concept of **parent** and **child**. A " +"parent cache is a cache higher up in the hierarchy, to which Traffic Server " +"can forward requests. A child cache is a cache for which Traffic Server is " +"a parent." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:37 +msgid "Traffic Server supports the following hierarchical caching options:" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:40 +msgid "Parent Caching" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:42 +msgid "" +"If a Traffic Server node cannot find a requested object in its cache, then " +"it searches a parent cache (which itself can search other caches) before " +"finally retrieving the object from the origin server. You can configure a " +"Traffic Server node to use one or more parent caches so that if one parent " +"is unavailable, then another parent is availale to service requests. This " +"is called `Parent Failover`_. Traffic Server will support parent caching " +"for HTTP and HTTPS requests." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:55 +msgid "" +"The figure below illustrates a simple cache hierarchy with a Traffic Server " +"node configured to use a parent cache. In the following scenario, a client " +"sends a request to a Traffic Server node that is a child in the cache " +"hierarchy (because it's configured to forward missed requests to a parent " +"cache). The request is a cache miss, so Traffic Server then forwards the " +"request to the parent cache, where it is a cache hit. The parent sends a " +"copy of the content to the Traffic Server, where it is cached and then " +"served to the client. Future requests for this content can now be served " +"directly from the Traffic Server cache (until the data is stale or expired)." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:70 +msgid "Parent caching" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:72 +msgid "" +"**Note:** If the request is a cache miss on the parent, then the parent " +"retrieves the content from the origin server (or from another cache, " +"depending on the parent’s configuration). The parent caches the content and " +"then sends a copy to Traffic Server (its child), where it is cached and " +"served to the client." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:79 +msgid "Parent Failover" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:81 +msgid "" +"Traffic Server supports use of several parent caches. This ensures that if " +"one parent cache is not available, another parent cache can service client " +"requests." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:85 +msgid "" +"When you configure your Traffic Server to use more than one parent cache, " +"Traffic Server detects when a parent is not available and sends missed " +"requests to another parent cache. If you specify more than two parent " +"caches, then the order in which the parent caches are queried depends upon " +"the parent proxy rules configured in the file:`parent.config` configuration " +"file. By default, the parent caches are queried in the order they are " +"listed in the configuration file." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:96 +msgid "Configuring Traffic Server to Use a Parent Cache" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:98 +msgid "" +"To configure Traffic Server to use one or more parent caches, you must " +"complete the following steps:" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:101 +msgid "Enable the parent caching option." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:102 +msgid "" +"Identify the parent cache you want to use to service missed requests. To " +"use **parent failover**, you must identify more than one parent cache so " +"that when a parent cache is unavailable, requests are sent to another " +"parent cache." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:107 +msgid "" +"You need to configure the child cache only. No additional configuration is " +"needed for the Traffic Server parent cache." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:109 +msgid "" +"Configure Traffic Server to use a parent cache by editing the following " +"variable :ts:cv:`proxy.config.http.parent_proxy_routing_enable` in :file:" +"`records.config` file." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:112 +msgid "" +"Edit the :file:`parent.config` file located in the Traffic Server " +"``config`` directory to set parent proxy rules to specify the parent cache " +"to which you want missed requests to be forwarded;" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:116 +msgid "" +"The following example configures Traffic Server to route all requests " +"containing the regular expression ``politics`` and the path ``/viewpoint`` " +"directly to the origin server (bypassing any parent hierarchies): ::" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:123 +msgid "" +"The following example configures Traffic Server to direct all missed " +"requests with URLs beginning with ``http://host1`` to the parent cache " +"``parent1``. If ``parent1`` cannot serve the requests, then requests are " +"forwarded to ``parent2``. Because ``round-robin=true``, Traffic Server goes " +"through the parent cache list in a round-robin based on client IP address.::" +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:132 +msgid "" +"Run the command :option:`traffic_line -x` to apply the configuration " +"changes." +msgstr "" + +#: ../../admin/hierachical-caching.en.rst:50 +msgid "" +"**Note:** If you do not want all requests to go to the parent cache, then " +"simply configure Traffic Server to route certain requests (such as requests " +"containing specific URLs) directly to the origin server. Simply set parent " +"proxy rules in :file:`parent.config`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/admin/http-proxy-caching.en.po b/doc/locale/ja/LC_MESSAGES/admin/http-proxy-caching.en.po new file mode 100644 index 00000000..43bd1dd0 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/http-proxy-caching.en.po @@ -0,0 +1,1885 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: 2013-12-15 14:15+0900\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/http-proxy-caching.en.rst:4 +msgid "HTTP Proxy Caching" +msgstr "HTTP プロキシーキャッシュ" + +#: ../../admin/http-proxy-caching.en.rst:23 +msgid "" +"Web proxy caching enables you to store copies of frequently-accessed web " +"objects (such as documents, images, and articles) and then serve this " +"information to users on demand. It improves performance and frees up " +"Internet bandwidth for other tasks." +msgstr "" +"ウェブプロキシーキャッシュは頻繁にアクセスされるウェブオブジェクト(ドキュメ" +"ントや画像、記事など)のコピーを保存し、ユーザーの求めに応じてこれらを配信す" +"ることを可能にします。これはパフォーマンスを向上させ、インターネットの帯域を" +"他のタスクのために空けます。" + +#: ../../admin/http-proxy-caching.en.rst:32 +msgid "Understanding HTTP Web Proxy Caching" +msgstr "HTTP ウェブプロキシーキャッシュの理解" + +#: ../../admin/http-proxy-caching.en.rst:34 +msgid "" +"Internet users direct their requests to web servers all over the Internet. " +"A caching server must act as a **web proxy server** so it can serve those " +"requests. After a web proxy server receives requests for web objects, it " +"either serves the requests or forwards them to the **origin server** (the " +"web server that contains the original copy of the requested information). " +"The Traffic Server proxy supports **explicit proxy caching**, in which the " +"user's client software must be configured to send requests directly to the " +"Traffic Server proxy. The following overview illustrates how Traffic Server " +"serves a request." +msgstr "" +"インターネットユーザはインターネット上のウェブサーバーへリクエストを出しま" +"す。キャッシュサーバーはこれらのリクエストを満たすために **ウェブプロキシ" +"サーバー** として振る舞わなくてはなりません。ウェブプロキシーサーバーがウェ" +"ブオブジェクトへのリクエストを受け取った後は、そのリクエストを返すか、**オリ" +"ジンサーバー** (リクエストされた情報のオリジナルコピーを持っているウェブサー" +"バー)へ転送します。Traffic Server プロキシーは **explicit proxy caching** を" +"サポートしています。この際、ユーザーのクライアントソフトが Traffic Server プ" +"ロキシーへ直接リクエストを送るように設定されている必要があります。次のオー" +"バービューは Traffic Server がどのようにリクエストを返すかを描いています。" + +#: ../../admin/http-proxy-caching.en.rst:44 +msgid "Traffic Server receives a client request for a web object." +msgstr "" +"Traffic Server がウェブオブジェクトへのクライアントリクエストを受け取りま" +"す。" + +#: ../../admin/http-proxy-caching.en.rst:46 +msgid "" +"Using the object address, Traffic Server tries to locate the requested " +"object in its object database (**cache**)." +msgstr "" +"オブジェクトのアドレスを用いて、Traffic Server はオブジェクトデータベース" +"( **キャッシュ** )からリクエストされたオブジェクトを探します。" + +#: ../../admin/http-proxy-caching.en.rst:49 +msgid "" +"If the object is in the cache, then Traffic Server checks to see if the " +"object is fresh enough to serve. If it is fresh, then Traffic Server serves " +"it to the client as a **cache hit** (see the figure below)." +msgstr "" +"キャッシュにオブジェクトがある場合、Traffic Server はオブジェクトが提供する" +"のに十分新しいか確認します。新しい場合、Traffic Server は *キャッシュヒット" +"* (下の図を見てください) としてクライアントにそれを提供します。" + +#: ../../admin/http-proxy-caching.en.rst:58 +msgid "A cache hit" +msgstr "キャッシュヒット" + +#: ../../admin/http-proxy-caching.en.rst:60 +msgid "" +"If the data in the cache is stale, then Traffic Server connects to the " +"origin server and checks if the object is still fresh (a :term:" +"`revalidation`). If it is, then Traffic Server immediately sends the cached " +"copy to the client." +msgstr "" +"キャッシュのデータが古い場合、Traffic Server はオリジンサーバーへ接続し、オ" +"ブジェクトが依然新しいかどうか確認します。( :term:`再確認` ) 新しい場合、" +"Traffic Server はすぐにキャッシュしているコピーをクライアントに送ります。" + +#: ../../admin/http-proxy-caching.en.rst:65 +msgid "" +"If the object is not in the cache (a **cache miss**) or if the server " +"indicates the cached copy is no longer valid, then Traffic Server obtains " +"the object from the origin server. The object is then simultaneously " +"streamed to the client and the Traffic Server local cache (see the figure " +"below). Subsequent requests for the object can be served faster because the " +"object is retrieved directly from cache." +msgstr "" +"オブジェクトがキャッシュに無い場合 ( **キャッシュミス**) やサーバーがキャッ" +"シュしたコピーをもはや有効ではないと判断した場合、 Traffic Server はオリジン" +"サーバーからオブジェクトを取得します。オブジェクトはクライアントと Traffic " +"Server のローカルキャッシュに同時に流されます。(下の図を見てください) 続いて" +"起こるオブジェクトへのリクエストはよりはやく提供することができます。それはオ" +"ブジェクトがキャッシュから直接検索されるからです。" + +#: ../../admin/http-proxy-caching.en.rst:76 +msgid "A cache miss" +msgstr "キャッシュミス" + +#: ../../admin/http-proxy-caching.en.rst:78 +msgid "" +"Caching is typically more complex than the preceding overview suggests. In " +"particular, the overview does not discuss how Traffic Server ensures " +"freshness, serves correct HTTP alternates, and treats requests for objects " +"that cannot/should not be cached. The following sections discuss these " +"issues in greater detail." +msgstr "" +"一般的にキャッシュは前述の概要で説明したものよりも複雑です。 詳しく述べる" +"と、概要では Traffic Server がどのように新鮮さを保証し、正しい HTTP オブジェ" +"クトの代替を提供し、キャッシュできない/するべきではないオブジェクトへのリク" +"エストを扱うかについて説明されていませんでした。次の章はこれらのことについて" +"とても詳しく説明します。" + +#: ../../admin/http-proxy-caching.en.rst:85 +msgid "Ensuring Cached Object Freshness" +msgstr "キャッシュされたオブジェクトの新鮮さの保証" + +#: ../../admin/http-proxy-caching.en.rst:87 +msgid "" +"When Traffic Server receives a request for a web object, it first tries to " +"locate the requested object in its cache. If the object is in cache, then " +"Traffic Server checks to see if the object is fresh enough to serve. For " +"HTTP objects, Traffic Server supports optional author-specified expiration " +"dates. Traffic Server adheres to these expiration dates; otherwise, it " +"picks an expiration date based on how frequently the object is changing and " +"on administrator-chosen freshness guidelines. Objects can also be " +"revalidated by checking with the origin server to see if an object is still " +"fresh." +msgstr "" +"Traffic Server がウェブオブジェクトへのリクエストを受け取った際、最初にリク" +"エストされたオブジェクトをキャッシュから探します。オブジェクトがキャッシュに" +"ある場合、Traffic Server はオブジェクトが提供するのに十分新しいかどうかを確" +"認します。 Traffic Server は HTTP オブジェクトに作成者が指定した有効期限をサ" +"ポートしています。 Traffic Server はこれらの有効期限を固く守ります。つまり、" +"どれだけ頻繁にオブジェクトが変更されるかと、管理者が選んだフレッシュネスガイ" +"ドラインに基づいて、有効期限を選択します。オブジェクトはまた、依然として新し" +"いかどうかをオリジンサーバーへ見に行くことにより、再検証されます。" + +#: ../../admin/http-proxy-caching.en.rst:98 +msgid "HTTP Object Freshness" +msgstr "HTTP オブジェクトの新鮮さ" + +#: ../../admin/http-proxy-caching.en.rst:100 +msgid "" +"Traffic Server determines whether an HTTP object in the cache is fresh by:" +msgstr "" +"Traffic Server はキャッシュした HTTP オブジェクトが新しいかどうかを次のこと" +"によって決定します。" + +#: ../../admin/http-proxy-caching.en.rst:103 +msgid "**Checking the** ``Expires`` **or** ``max-age`` **header**" +msgstr "``Expires`` **や** ``max-age`` **ヘッダーの確認**" + +#: ../../admin/http-proxy-caching.en.rst:105 +msgid "" +"Some HTTP objects contain ``Expires`` headers or ``max-age`` headers that " +"explicitly define how long the object can be cached. Traffic Server " +"compares the current time with the expiration time to determine if the " +"object is still fresh." +msgstr "" +"いくつかの HTTP オブジェクトは ``Expire`` ヘッダーや ``max-age`` ヘッダーを" +"含んでいます。これらはオブジェクトがどれくらいの期間キャッシュできるかどうか" +"を明確に定義しています。 Traffic Server はオブジェクトが新しいかどうかを決定" +"するために、現在時刻と有効期限を比較します。" + +#: ../../admin/http-proxy-caching.en.rst:110 +msgid "**Checking the** ``Last-Modified`` **/** ``Date`` **header**" +msgstr "``Last-Modified`` **/** ``Date`` **ヘッダーの確認**" + +#: ../../admin/http-proxy-caching.en.rst:112 +msgid "" +"If an HTTP object has no ``Expires`` header or ``max-age`` header, then " +"Traffic Server can calculate a freshness limit using the following formula::" +msgstr "" +"HTTP オブジェクトが ``Expire`` ヘッダーや ``max-age`` ヘッダーを持っていない" +"場合、Traffic Server はフレッシュネスリミットを次の式で計算します。" + +#: ../../admin/http-proxy-caching.en.rst:118 +msgid "" +"where *date* is the date in the object's server response header and " +"*last_modified* is the date in the ``Last-Modified`` header. If there is no " +"``Last-Modified`` header, then Traffic Server uses the date the object was " +"written to cache. The value ``0.10`` (10 percent) can be increased or " +"reduced to better suit your needs (refer to `Modifying Aging Factor for " +"Freshness Computations`_)." +msgstr "" +"この *date* はオブジェクトのサーバーのレスポンスヘッダーの日付で、" +"*last_modified* は ``Last-Modified`` ヘッダーの日付です。 ``Last-Modified`` " +"ヘッダーが無い場合、Traffic Server はオブジェクトがキャッシュに書かれた日時" +"を使用します。 ``0.10`` (10 %) という値は必要に応じて、増減することができま" +"す。 (詳しくは `新鮮さの計算のための期間要素の変更`_ を参照してください)" + +#: ../../admin/http-proxy-caching.en.rst:125 +msgid "" +"The computed freshness limit is bound by a minimum and maximum value - " +"refer to `Setting Absolute Freshness Limits`_ for more information." +msgstr "" +"計算されたフレッシュネスリミットは最小値と最大値に紐づけられます。 - 詳細は " +"`絶対フレッシュネスリミットの設定`_ を参照してください。" + +#: ../../admin/http-proxy-caching.en.rst:128 +msgid "**Checking the absolute freshness limit**" +msgstr "**絶対フレッシュネスリミットの確認**" + +#: ../../admin/http-proxy-caching.en.rst:130 +msgid "" +"For HTTP objects that do not have ``Expires`` headers or do not have both " +"``Last-Modified`` and ``Date`` headers, Traffic Server uses a maximum and " +"minimum freshness limit (refer to `Setting Absolute Freshness Limits`_)." +msgstr "" +"``Expires`` ヘッダーを持っていない、もしくは ``Last-Modified`` と ``Date`` " +"ヘッダーの両方をもっていない HTTP オブジェクトについて、Traffic Server はフ" +"レッシュネスリミットの最小値と最大値を使用します。( `絶対フレッシュネスリ" +"ミットの設定`_ を参照してください。)" + +#: ../../admin/http-proxy-caching.en.rst:134 +msgid "**Checking revalidate rules in the** :file:`cache.config` **file**" +msgstr ":file:`cache.config` **ファイル内の再確認ルールの確認**" + +#: ../../admin/http-proxy-caching.en.rst:136 +msgid "" +"Revalidate rules apply freshness limits to specific HTTP objects. You can " +"set freshness limits for objects originating from particular domains or IP " +"addresses, objects with URLs that contain specified regular expressions, " +"objects requested by particular clients, and so on (refer to :file:`cache." +"config`)." +msgstr "" +"再検証ルールは特定の HTTP オブジェクトにフレッシュネスリミットを適用します。" +"特定のドメインや IP アドレスから来たオブジェクト、特定の正規表現を含む URL " +"を持つオブジェクトや特定のクライアントからリクエストされたオブジェクトなどに" +"フレッシュネスリミットを設定することができます。( :file:`cache.config` を参" +"照してください。)" + +#: ../../admin/http-proxy-caching.en.rst:143 +msgid "Modifying Aging Factor for Freshness Computations" +msgstr "新鮮さの計算のための期間要素の変更" + +#: ../../admin/http-proxy-caching.en.rst:145 +msgid "" +"If an object does not contain any expiration information, then Traffic " +"Server can estimate its freshness from the ``Last-Modified`` and ``Date`` " +"headers. By default, Traffic Server stores an object for 10% of the time " +"that elapsed since it last changed. You can increase or reduce the " +"percentage according to your needs." +msgstr "" +"オブジェクトが有効期限に関する情報を持っていない場合、Traffic Server は " +"``Last-Modified`` と ``Date`` ヘッダーから新鮮さを見積もります。デフォルトで" +"は Traffic Server は最後に更新されてからの経過時間の 10 % キャッシュしま" +"す。 必要に応じて、増減することができます。" + +#: ../../admin/http-proxy-caching.en.rst:151 +msgid "To modify the aging factor for freshness computations" +msgstr "新鮮さの計算のための期間の要素を変更するためには、" + +#: ../../admin/http-proxy-caching.en.rst:153 +msgid "" +"Change the value for :ts:cv:`proxy.config.http.cache.heuristic_lm_factor`." +msgstr "" +":ts:cv:`proxy.config.http.cache.heuristic_lm_factor` の値を変更してくださ" +"い。" + +#: ../../admin/http-proxy-caching.en.rst:155 +#: ../../admin/http-proxy-caching.en.rst:173 +#: ../../admin/http-proxy-caching.en.rst:192 +#: ../../admin/http-proxy-caching.en.rst:281 +#: ../../admin/http-proxy-caching.en.rst:332 +msgid "" +"Run the :option:`traffic_line -x` command to apply the configuration " +"changes." +msgstr "" +"設定変更を適用するために :option:`traffic_line -x` コマンドを実行してくださ" +"い。" + +#: ../../admin/http-proxy-caching.en.rst:159 +msgid "Setting absolute Freshness Limits" +msgstr "絶対フレッシュネスリミットの設定" + +#: ../../admin/http-proxy-caching.en.rst:161 +msgid "" +"Some objects do not have ``Expires`` headers or do not have both ``Last-" +"Modified`` and ``Date`` headers. To control how long these objects are " +"considered fresh in the cache, specify an **absolute freshness limit**." +msgstr "" +"いくつかのオブジェクトは ``Expires`` ヘッダーを持っていない、もしくは " +"``Last-Modified`` と ``Date`` ヘッダーの両方を持っていないことがあります。こ" +"れらのオブジェクトがキャッシュされてどの程度フレッシュであると考えられるか制" +"御するために、 **絶対フレッシュネスリミット** があります。" + +#: ../../admin/http-proxy-caching.en.rst:166 +msgid "To specify an absolute freshness limit" +msgstr "絶対フレッシュネスリミットを明確にするために" + +#: ../../admin/http-proxy-caching.en.rst:168 +msgid "Edit the variables" +msgstr "次の変数を変更してください。" + +#: ../../admin/http-proxy-caching.en.rst:170 +msgid ":ts:cv:`proxy.config.http.cache.heuristic_min_lifetime`" +msgstr ":ts:cv:`proxy.config.http.cache.heuristic_min_lifetime`" + +#: ../../admin/http-proxy-caching.en.rst:171 +msgid ":ts:cv:`proxy.config.http.cache.heuristic_max_lifetime`" +msgstr ":ts:cv:`proxy.config.http.cache.heuristic_max_lifetime`" + +#: ../../admin/http-proxy-caching.en.rst:177 +msgid "Specifying Header Requirements" +msgstr "必須ヘッダーの記述" + +#: ../../admin/http-proxy-caching.en.rst:179 +msgid "" +"To further ensure freshness of the objects in the cache, configure Traffic " +"Server to cache only objects with specific headers. By default, Traffic " +"Server caches all objects (including objects with no headers); you should " +"change the default setting only for specialized proxy situations. If you " +"configure Traffic Server to cache only HTTP objects with ``Expires`` or " +"``max-age`` headers, then the cache hit rate will be noticeably reduced " +"(since very few objects will have explicit expiration information)." +msgstr "" +"よりいっそうキャッシュしているオブジェクトの新鮮さを確かめるために、特定の" +"ヘッダーを持っているオブジェクトだけをキャッシュするように Traffic Server を" +"設定することができます。デフォルトでは Traffic Server は(ヘッダーがないもの" +"も含む)全てのオブジェクトをキャッシュします。特別なプロキシーの状況の場合の" +"みデフォルト設定を変更するべきです。Traffic Server を ``Expires`` もしくは " +"``max-age`` ヘッダーを持つオブジェクトだけをキャッシュするように設定した場" +"合、キャッシュヒット率は明らかに下がるでしょう。(とても少ないオブジェクトし" +"か明確な有効期限の情報をもっていないと考えられるためです。)" + +#: ../../admin/http-proxy-caching.en.rst:188 +msgid "To configure Traffic Server to cache objects with specific headers" +msgstr "" +"特定のヘッダーを持つオブジェクトをキャッシュするように Traffic Server を設定" +"するには" + +#: ../../admin/http-proxy-caching.en.rst:190 +msgid "" +"Change the value for :ts:cv:`proxy.config.http.cache.required_headers`." +msgstr "" +":ts:cv:`proxy.config.http.cache.required_headers` の値を変更してください。" + +#: ../../admin/http-proxy-caching.en.rst:198 +msgid "Cache-Control Headers" +msgstr "Cache-Control ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:200 +msgid "" +"Even though an object might be fresh in the cache, clients or servers often " +"impose their own constraints that preclude retrieval of the object from the " +"cache. For example, a client might request that a object *not* be retrieved " +"from a cache, or if it does, then it cannot have been cached for more than " +"10 minutes. Traffic Server bases the servability of a cached object on " +"``Cache-Control`` headers that appear in both client requests and server " +"responses. The following ``Cache-Control`` headers affect whether objects " +"are served from cache:" +msgstr "" +"キャッシュしたあるオブジェクトが新鮮だと思われる場合であっても、クライアント" +"やサーバーはキャッシュからのオブジェクトの読み出しを妨害するようにたびたび制" +"限を課します。例えば、あるクライアントがキャッシュから復旧するべき *ではない" +"* オブジェクトへリクエストするかもしれません。また、それをした場合、10 分以" +"上はキャッシュすることはできません。 Traffic Server はキャッシュしたオブジェ" +"クトの提供可能性をクライアントのリクエストとサーバーのレスポンス両方に現れ" +"る ``Cache-Control`` ヘッダーを根拠に決定しています。次のような ``Cache-" +"Control`` ヘッダーはキャッシュからオブジェクトを提供するかどうかに影響しま" +"す。" + +#: ../../admin/http-proxy-caching.en.rst:209 +msgid "" +"The ``no-cache`` header, sent by clients, tells Traffic Server that it " +"should not serve any objects directly from the cache; therefore, Traffic " +"Server will always obtain the object from the origin server. You can " +"configure Traffic Server to ignore client ``no-cache`` headers - refer to " +"`Configuring Traffic Server to Ignore Client no-cache Headers`_ for more " +"information." +msgstr "" +"クライアントから送られる ``no-cache`` ヘッダーはどんなオブジェクトもキャッ" +"シュから直接返すべきではないということを Traffic Server に示します。従って、" +"Traffic Server は常にオリジンサーバーからオブジェクトを取得します。 Traffic " +"Server をクライアントからの ``no-cache`` ヘッダーを無視するように設定するこ" +"ともできます。詳細は `クライアントの no-cache ヘッダーを無視する Traffic " +"Server の設定`_ を参照してください。" + +#: ../../admin/http-proxy-caching.en.rst:216 +msgid "" +"The ``max-age`` header, sent by servers, is compared to the object age. If " +"the age is less than ``max-age``, then the object is fresh and can be " +"served." +msgstr "" +"サーバーから送られる ``max-age`` ヘッダーはオブジェクトのキャッシュされてい" +"る時間と比較されます。この時間が ``max-age`` よりも少ない場合、オブジェクト" +"はフレッシュであり配信されます。" + +#: ../../admin/http-proxy-caching.en.rst:220 +msgid "" +"The ``min-fresh`` header, sent by clients, is an **acceptable freshness " +"tolerance**. This means that the client wants the object to be at least " +"this fresh. Unless a cached object remains fresh at least this long in the " +"future, it is revalidated." +msgstr "" +"クライアントからの ``min-fresh`` ヘッダーは **受け入れることが許容できる新鮮" +"さ** です。これはクライアントが少なくとも指定された程度新鮮であることを望ん" +"でいるということを意味します。将来、キャッシュされたオブジェクトがこの長さの" +"新鮮さを失った場合、再検証されます。" + +#: ../../admin/http-proxy-caching.en.rst:225 +msgid "" +"The ``max-stale`` header, sent by clients, permits Traffic Server to serve " +"stale objects provided they are not too old. Some browsers might be willing " +"to take slightly stale objects in exchange for improved performance, " +"especially during periods of poor Internet availability." +msgstr "" +"クライアントからの ``max-stale`` ヘッダーは Traffic Server に古すぎない失効" +"したオブジェクトを配信することを許可します。いくつかのブラウザーは特に貧弱な" +"インターネット環境にあるような場合パフォーマンスを向上させるため、わずかに失" +"効したオブジェクトを受け取ることを望むかもしれません。" + +#: ../../admin/http-proxy-caching.en.rst:231 +msgid "" +"Traffic Server applies ``Cache-Control`` servability criteria ***after*** " +"HTTP freshness criteria. For example, an object might be considered fresh " +"but will not be served if its age is greater than its ``max-age``." +msgstr "" +"Traffic Server は ``Cache-Control`` を HTTP の新鮮さの基準の ***後に*** 配信" +"可能性の基準に適用します。例えば、あるオブジェクトが新鮮だと考えられる場合で" +"も、経過時間が ``max-age`` よりも大きいければ、それは配信されません。" + +#: ../../admin/http-proxy-caching.en.rst:237 +msgid "Revalidating HTTP Objects" +msgstr "HTTP オブジェクトの再検証" + +#: ../../admin/http-proxy-caching.en.rst:239 +msgid "" +"When a client requests an HTTP object that is stale in the cache, Traffic " +"Server revalidates the object. A **revalidation** is a query to the origin " +"server to check if the object is unchanged. The result of a revalidation is " +"one of the following:" +msgstr "" +"クライアントがキャッシュの中で新鮮ではなくなった HTTP オブジェクトをリクエス" +"トした際、Traffic Server はそのオブジェクトを再検証します。**再検証** はオリ" +"ジンサーバーへオブジェクトが変更されているかどうかを確認する問い合わせです。" +"再検証の結果は次のいずれかです。" + +#: ../../admin/http-proxy-caching.en.rst:244 +msgid "" +"If the object is still fresh, then Traffic Server resets its freshness " +"limit and serves the object." +msgstr "" +"オブジェクトが依然として新鮮な場合、Traffic Server はフレッシュネスリミット" +"をリセットして、そのオブジェクトを配信します。" + +#: ../../admin/http-proxy-caching.en.rst:247 +msgid "" +"If a new copy of the object is available, then Traffic Server caches the " +"new object (thereby replacing the stale copy) and simultaneously serves the " +"object to the client." +msgstr "" +"オブジェクトの新しいコピーが有効な場合、Traffic Server は新しいオブジェクト" +"をキャッシュします。(従って、新鮮ではないコピーは置き換えられます)また、同時" +"にオブジェクトをクライアントに配信します。" + +#: ../../admin/http-proxy-caching.en.rst:251 +msgid "" +"If the object no longer exists on the origin server, then Traffic Server " +"does not serve the cached copy." +msgstr "" +"オブジェクトがオリジンサーバー上に存在しない場合、Traffic Server はキャッ" +"シュしたコピーを配信しません。" + +#: ../../admin/http-proxy-caching.en.rst:254 +msgid "" +"If the origin server does not respond to the revalidation query, then " +"Traffic Server serves the stale object along with a ``111 Revalidation " +"Failed`` warning." +msgstr "" +"オリジンサーバーが再検証の問い合わせに応答しない場合、Traffic Server は " +"``111 Revalidation Failed`` 警告と共に新鮮ではないオブジェクトを配信します。" + +#: ../../admin/http-proxy-caching.en.rst:258 +msgid "" +"By default, Traffic Server revalidates a requested HTTP object in the cache " +"if it considers the object to be stale. Traffic Server evaluates object " +"freshness as described in `HTTP Object Freshness`_. You can reconfigure how " +"Traffic Server evaluates freshness by selecting one of the following " +"options:" +msgstr "" +"デフォルトでは Traffic Server はリクエストされた HTTP オブジェクトが新鮮では" +"ないと考えられる場合に再検証します。Traffic Server のオブジェクトの新鮮さの" +"評価については `HTTP オブジェクトの新鮮さ`_ で述べられています。次のオプショ" +"ンの一つを選ぶことによって、 Traffic Server が新鮮さを評価する方法を再設定す" +"ることができます。" + +#: ../../admin/http-proxy-caching.en.rst:264 +msgid "" +"Traffic Server considers all HTTP objects in the cache to be stale: always " +"revalidate HTTP objects in the cache with the origin server." +msgstr "" +"Traffic Server はキャッシュしている全ての HTTP オブジェクトが新鮮ではないと" +"考えます。つまり、常にキャッシュの中の HTTP オブジェクトをオリジンサーバーへ" +"再検証します。" + +#: ../../admin/http-proxy-caching.en.rst:266 +msgid "" +"Traffic Server considers all HTTP objects in the cache to be fresh: never " +"revalidate HTTP objects in the cache with the origin server." +msgstr "" +"Traffic Server はキャッシュしている全ての HTTP オブジェクトを新鮮であると考" +"えます。つまり、オリジンサーバーへ HTTP オブジェクトを再検証することはありま" +"せん。" + +#: ../../admin/http-proxy-caching.en.rst:268 +msgid "" +"Traffic Server considers all HTTP objects without ``Expires`` or ``Cache-" +"control`` headers to be stale: revalidate all HTTP objects without " +"``Expires`` or ``Cache-Control`` headers." +msgstr "" +"Traffic Server は ``Expires`` や ``Cache-Control`` ヘッダーを持っていない " +"HTTP オブジェクトを新鮮ではないと考えます。つまり、常に ``Expires`` や " +"``Cache-Control`` ヘッダーのない HTTP オブジェクトを再検証します。" + +#: ../../admin/http-proxy-caching.en.rst:272 +msgid "" +"To configure how Traffic Server revalidates objects in the cache, you can " +"set specific revalidation rules in :file:`cache.config`." +msgstr "" +"Traffic Server がキャッシュしているオブジェクトを再検証する方法を設定するに" +"は :file:`cache.config` に特定の再検証のルールを設定してください。" + +#: ../../admin/http-proxy-caching.en.rst:275 +msgid "To configure revalidation options" +msgstr "再検証のオプションを設定するには" + +#: ../../admin/http-proxy-caching.en.rst:277 +#: ../../admin/http-proxy-caching.en.rst:490 +#: ../../admin/http-proxy-caching.en.rst:606 +msgid "Edit the following variable in :file:`records.config`" +msgstr ":file:`records.config` の次の変数を変更してください。" + +#: ../../admin/http-proxy-caching.en.rst:279 +msgid ":ts:cv:`proxy.config.http.cache.when_to_revalidate`" +msgstr ":ts:cv:`proxy.config.http.cache.when_to_revalidate`" + +#: ../../admin/http-proxy-caching.en.rst:285 +msgid "Scheduling Updates to Local Cache Content" +msgstr "ローカルキャッシュコンテンツの計画アップデート" + +#: ../../admin/http-proxy-caching.en.rst:287 +msgid "" +"To further increase performance and to ensure that HTTP objects are fresh " +"in the cache, you can use the **Scheduled Update** option. This configures " +"Traffic Server to load specific objects into the cache at scheduled times. " +"You might find this especially beneficial in a reverse proxy setup, where " +"you can *preload* content you anticipate will be in demand." +msgstr "" +"パフォーマンスをはるかに向上させるため、またキャッシュしている HTTP オブジェ" +"クトが新鮮であることを確実にするために、**計画アップデート** オプションを使" +"うことができます。これは特定のオブジェクトをスケジュールされた時間にキャッ" +"シュに読み込むように Traffic Server を設定します。リバースプロキシーをセット" +"アップしている際に、 負荷が心配されるコンテンツを *事前に読み込む* ことがで" +"きるという点で特に役に立つことに気づくかもしれません。" + +#: ../../admin/http-proxy-caching.en.rst:294 +msgid "" +"To use the Scheduled Update option, you must perform the following tasks." +msgstr "" +"計画的アップデートオプションを使うためには次のタスクを行う必要があります。" + +#: ../../admin/http-proxy-caching.en.rst:297 +msgid "" +"Specify the list of URLs that contain the objects you want to schedule for " +"update," +msgstr "スケジュール通りにアップデートしたいオブジェクトを含む URL のリスト" + +#: ../../admin/http-proxy-caching.en.rst:299 +msgid "the time the update should take place," +msgstr "アップデートが実行されるべき時間" + +#: ../../admin/http-proxy-caching.en.rst:300 +msgid "and the recursion depth for the URL." +msgstr "URL の再帰する深さを指定" + +#: ../../admin/http-proxy-caching.en.rst:301 +msgid "" +"Enable the scheduled update option and configure optional retry settings." +msgstr "計画アップデートオプションの有効化とオプショナルなリトライ設定の指定" + +#: ../../admin/http-proxy-caching.en.rst:304 +msgid "" +"Traffic Server uses the information you specify to determine URLs for which " +"it is responsible. For each URL, Traffic Server derives all recursive URLs " +"(if applicable) and then generates a unique URL list. Using this list, " +"Traffic Server initiates an HTTP ``GET`` for each unaccessed URL. It " +"ensures that it remains within the user-defined limits for HTTP concurrency " +"at any given time. The system logs the completion of all HTTP ``GET`` " +"operations so you can monitor the performance of this feature." +msgstr "" +"Traffic Server は責任を持つ URL を決定するために、指定された情報を使います。" +"各 URL に対して Traffic Server は (適用可能であれば) 全ての再帰的な URL を作" +"成し、ユニークな URL リストを生成します。このリストをもとに、Traffic Server " +"はまだアクセスされていない各 URL に対して HTTP ``GET`` リクエストを開始しま" +"す。このリクエストは常に ユーザーが定義した HTTP の並列度の範囲に収まること" +"が保証されています。システムは全ての HTTP ``GET`` オペレーションの完了を記録" +"します。よって、この機能のパフォーマンスを監視することができます。" + +#: ../../admin/http-proxy-caching.en.rst:313 +msgid "" +"Traffic Server also provides a **Force Immediate Update** option that " +"enables you to update URLs immediately without waiting for the specified " +"update time to occur. You can use this option to test your scheduled update " +"configuration (refer to `Forcing an Immediate Update`_)." +msgstr "" +"Traffic Server は **強制即時アップデート** オプションも提供します。これは " +"URL を指定されたアップデート時間になるまで待つことなく、すぐにアップデートす" +"ることを可能にします。このオプションを計画アップデートの設定をテストするため" +"に使うことができます。( `即時アップデートの強制`_ を参照してください)" + +#: ../../admin/http-proxy-caching.en.rst:319 +msgid "Configuring the Scheduled Update Option" +msgstr "計画アップデートオプションの設定" + +#: ../../admin/http-proxy-caching.en.rst:321 +msgid "To configure the scheduled update option" +msgstr "計画アップデートオプションを設定するためには" + +#: ../../admin/http-proxy-caching.en.rst:323 +msgid "" +"Edit :file:`update.config` to enter a line in the file for each URL you " +"want to update." +msgstr "" +":file:`update.config` にアップデートしたい URL を一行毎に書いてください" + +#: ../../admin/http-proxy-caching.en.rst:325 +#: ../../admin/http-proxy-caching.en.rst:346 +#: ../../admin/http-proxy-caching.en.rst:675 +msgid "Edit the following variables" +msgstr "次の変数を変更してください。" + +#: ../../admin/http-proxy-caching.en.rst:327 +msgid ":ts:cv:`proxy.config.update.enabled`" +msgstr ":ts:cv:`proxy.config.update.enabled`" + +#: ../../admin/http-proxy-caching.en.rst:328 +msgid ":ts:cv:`proxy.config.update.retry_count`" +msgstr ":ts:cv:`proxy.config.update.retry_count`" + +#: ../../admin/http-proxy-caching.en.rst:329 +msgid ":ts:cv:`proxy.config.update.retry_interval`" +msgstr ":ts:cv:`proxy.config.update.retry_interval`" + +#: ../../admin/http-proxy-caching.en.rst:330 +msgid ":ts:cv:`proxy.config.update.concurrent_updates`" +msgstr ":ts:cv:`proxy.config.update.concurrent_updates`" + +#: ../../admin/http-proxy-caching.en.rst:336 +msgid "Forcing an Immediate Update" +msgstr "即時アップデートの強制" + +#: ../../admin/http-proxy-caching.en.rst:338 +msgid "" +"Traffic Server provides a **Force Immediate Update** option that enables " +"you to immediately verify the URLs listed in :file:`update.config`. The " +"Force Immediate Update option disregards the offset hour and interval set " +"in :file:`update.config` and immediately updates the URLs listed." +msgstr "" +"Traffic Server は **強制即時アップデート** オプションを提供しています。これ" +"は :file:`update.config` ファイルにリストされた URL を即時に検証す ることを" +"可能にします。強制即時アップデートオプションは :file:`update.config` ファイ" +"ルに設定されたオフセット時間と間隔を無視し、リストされた URL を即時的にアッ" +"プデートします。" + +#: ../../admin/http-proxy-caching.en.rst:344 +msgid "To configure the Force Immediate Update option" +msgstr "強制即時アップデートオプション設定" + +#: ../../admin/http-proxy-caching.en.rst:348 +msgid ":ts:cv:`proxy.config.update.force`" +msgstr ":ts:cv:`proxy.config.update.force`" + +#: ../../admin/http-proxy-caching.en.rst:349 +msgid "Make sure :ts:cv:`proxy.config.update.enabled` is set to 1." +msgstr "" +":ts:cv:`proxy.config.update.enabled` に 1 が設定されていることを確認してくだ" +"さい。" + +#: ../../admin/http-proxy-caching.en.rst:351 +#: ../../admin/http-proxy-caching.en.rst:382 +#: ../../admin/http-proxy-caching.en.rst:436 +#: ../../admin/http-proxy-caching.en.rst:494 +#: ../../admin/http-proxy-caching.en.rst:533 +#: ../../admin/http-proxy-caching.en.rst:557 +#: ../../admin/http-proxy-caching.en.rst:590 +#: ../../admin/http-proxy-caching.en.rst:610 +#: ../../admin/http-proxy-caching.en.rst:635 +#: ../../admin/http-proxy-caching.en.rst:652 +#: ../../admin/http-proxy-caching.en.rst:682 +#: ../../admin/http-proxy-caching.en.rst:708 +#: ../../admin/http-proxy-caching.en.rst:735 +msgid "" +"Run the command :option:`traffic_line -x` to apply the configuration " +"changes." +msgstr "" +"設定変更を適用するために :option:`traffic_line -x` を実行してください。" + +#: ../../admin/http-proxy-caching.en.rst:356 +msgid "" +"When you enable the Force Immediate Update option, Traffic Server " +"continually updates the URLs specified in :file:`update.config` until you " +"disable the option. To disable the Force Immediate Update option, set :ts:" +"cv:`proxy.config.update.force` to ``0`` (zero)." +msgstr "" +"強制即時アップデートオプションを有効にした場合、Traffic Server はこのオプ" +"ションが無効化されるまで :file:`update.config` ファイルに指定された URL を" +"アップデートし続けます。強制即時アップデートオプションを無効化するために" +"は、 :ts:cv:`proxy.config.update.force` 変数を ``0`` (ゼロ) にしてください。" + +#: ../../admin/http-proxy-caching.en.rst:361 +msgid "Pushing Content into the Cache" +msgstr "コンテンツのキャッシュへのプッシュ" + +#: ../../admin/http-proxy-caching.en.rst:363 +msgid "" +"Traffic Server supports the HTTP ``PUSH`` method of content delivery. Using " +"HTTP ``PUSH``, you can deliver content directly into the cache without " +"client requests." +msgstr "" +"Traffic Server はコンテンツ配信に HTTP ``PUSH`` メソッドをサポートして いま" +"す。HTTP ``PUSH`` を使用すると、クライアントからのリクエスト無しに直接コンテ" +"ンツをキャッシュの中に入れることができます。" + +#: ../../admin/http-proxy-caching.en.rst:368 +msgid "Configuring Traffic Server for PUSH Requests" +msgstr "PUSH リクエスト用の Traffic Server の設定" + +#: ../../admin/http-proxy-caching.en.rst:370 +msgid "" +"Before you can deliver content into your cache using HTTP ``PUSH``, you " +"must configure Traffic Server to accept ``PUSH`` requests." +msgstr "" +"HTTP ``PUSH`` を使用してコンテンツをキャッシュの中に入れる前に、Traffic " +"Server が ``PUSH`` リクエストを受け入れるように設定する必要があります。" + +#: ../../admin/http-proxy-caching.en.rst:373 +msgid "To configure Traffic Server to accept ``PUSH`` requests" +msgstr "Traffic Server が ``PUSH`` リクエストを受け入れるように設定するには" + +#: ../../admin/http-proxy-caching.en.rst:375 +msgid "Edit :file:`ip_allow.config` to allow ``PUSH``." +msgstr ":file:`ip_allow.config` を変更して ``PUSH`` を許可してください。" + +#: ../../admin/http-proxy-caching.en.rst:377 +msgid "" +"Edit the following variable in :file:`records.config`, enable the " +"push_method." +msgstr "" +":file:`records.config` の次の変数を編集して、push_method を有効にしてくださ" +"い。" + +#: ../../admin/http-proxy-caching.en.rst:380 +msgid ":ts:cv:`proxy.config.http.push_method_enabled`" +msgstr ":ts:cv:`proxy.config.http.push_method_enabled`" + +#: ../../admin/http-proxy-caching.en.rst:386 +msgid "Understanding HTTP PUSH" +msgstr "HTTP PUSH の理解" + +#: ../../admin/http-proxy-caching.en.rst:388 +msgid "" +"``PUSH`` uses the HTTP 1.1 message format. The body of a ``PUSH`` request " +"contains the response header and response body that you want to place in " +"the cache. The following is an example of a ``PUSH`` request::" +msgstr "" +"``PUSH`` は HTTP 1.1 メッセージフォーマットを使用します。 ``PUSH`` リクエス" +"トのボディはキャッシュに入れたいレスポンスヘッダーとレスポンスボディを含みま" +"す。下記は ``PUSH`` リクエストの例です。" + +#: ../../admin/http-proxy-caching.en.rst:405 +msgid "" +"Your header must include ``Content-length`` - ``Content-length`` must " +"include both ``header`` and ``body byte count``." +msgstr "" +"ヘッダーは ``Contetnt-length`` を含んでいる必要があります - ``Contetn-" +"length`` は ``header`` と ``body byte count`` の両方を含む必 要があります。" + +#: ../../admin/http-proxy-caching.en.rst:409 +msgid "Tools that will help manage pushing" +msgstr "プッシュを手助けするツール" + +#: ../../admin/http-proxy-caching.en.rst:411 +msgid "" +"There is a perl script for pushing, :program:`tspush`, which can help you " +"understanding how to write scripts for pushing content yourself." +msgstr "" +"プッシュするための perl スクリプトがあります。:program:`tspush` です。これは" +"コンテンツをプッシュするためのスクリプトの書き方を理解することに役立ちます。" + +#: ../../admin/http-proxy-caching.en.rst:416 +msgid "Pinning Content in the Cache" +msgstr "コンテンツのキャッシュへのピン留め" + +#: ../../admin/http-proxy-caching.en.rst:418 +msgid "" +"The **Cache Pinning Option** configures Traffic Server to keep certain HTTP " +"objects in the cache for a specified time. You can use this option to " +"ensure that the most popular objects are in cache when needed and to " +"prevent Traffic Server from deleting important objects. Traffic Server " +"observes ``Cache-Control`` headers and pins an object in the cache only if " +"it is indeed cacheable." +msgstr "" +"**キャッシュのピン留めオプション** は特定の時間の間 HTTP オブジェクトを" +"キャッシュに確実に入れておくように Traffic Server を設定します。最もポピュ" +"ラーなオブジェクトが必要とされるときにキャッシュされていることと、 Traffic " +"Server が重要なオブジェクトを削除することを防ぐことを確実にしたい際にこのオ" +"プションが使えます。Traffic Server は ``Cache-Control`` ヘッダーを監視し、本" +"当にキャッシュ可能な場合にオブジェクトをキャッシュに留めます。" + +#: ../../admin/http-proxy-caching.en.rst:425 +msgid "To set cache pinning rules" +msgstr "キャッシュを留めるルールを設定するためには" + +#: ../../admin/http-proxy-caching.en.rst:427 +msgid "Make sure the following variable in :file:`records.config` is set" +msgstr "" +":file:`records.config` の次の変数がセットされていることを確認してください。" + +#: ../../admin/http-proxy-caching.en.rst:429 +msgid ":ts:cv:`proxy.config.cache.permit.pinning`" +msgstr ":ts:cv:`proxy.config.cache.permit.pinning`" + +#: ../../admin/http-proxy-caching.en.rst:431 +msgid "" +"Add a rule in :file:`cache.config` for each URL you want Traffic Server to " +"pin in the cache. For example::" +msgstr "" +"Traffic Server にキャッシュに留めさせたい URL 毎に :file:`cache.config` に" +"ルールを追加してください。例:" + +#: ../../admin/http-proxy-caching.en.rst:440 +msgid "To Cache or Not to Cache?" +msgstr "キャッシュするかキャッシュしないか?" + +#: ../../admin/http-proxy-caching.en.rst:442 +msgid "" +"When Traffic Server receives a request for a web object that is not in the " +"cache, it retrieves the object from the origin server and serves it to the " +"client. At the same time, Traffic Server checks if the object is cacheable " +"before storing it in its cache to serve future requests." +msgstr "" +"Traffic Server がキャッシュしていないウェブオブジェクトへのリクエストを受け" +"取った際、オリジンサーバーからオブジェクトを回収し、クライアントに配信しま" +"す。その際に、Traffic Server は将来のリクエストに備えてキャッシュに保存する" +"前に、オブジェクトがキャッシュ可能かどうか確認します。" + +#: ../../admin/http-proxy-caching.en.rst:448 +msgid "Caching HTTP Objects" +msgstr "HTTP オブジェクトのキャッシュ" + +#: ../../admin/http-proxy-caching.en.rst:450 +msgid "" +"Traffic Server responds to caching directives from clients and origin " +"servers, as well as directives you specify through configuration options " +"and files." +msgstr "" +"Traffic Server は設定オプションやファイルに指定したディレクティブと同じよう" +"に、クライアントやオリジンサーバーからのキャッシュのディレクティブに反応しま" +"す。" + +#: ../../admin/http-proxy-caching.en.rst:455 +msgid "Client Directives" +msgstr "クライアントディレクティブ" + +#: ../../admin/http-proxy-caching.en.rst:457 +msgid "" +"By default, Traffic Server does *not* cache objects with the following " +"**request headers**:" +msgstr "" +"デフォルトではTraffic Server は次の **リクエストヘッダー** を持つオブジェク" +"トをキャッシュ *しません* 。" + +#: ../../admin/http-proxy-caching.en.rst:460 +msgid "``Authorization``: header" +msgstr "``Authorization``: ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:462 +#: ../../admin/http-proxy-caching.en.rst:503 +msgid "``Cache-Control: no-store`` header" +msgstr "``Cache-Control: no-store`` ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:464 +msgid "``Cache-Control: no-cache`` header" +msgstr "``Cache-Control: no-cache`` ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:466 +msgid "" +"To configure Traffic Server to ignore the ``Cache-Control: no-cache`` " +"header, refer to `Configuring Traffic Server to Ignore Client no-cache " +"Headers`_" +msgstr "" +"``Cache-Control: no-cache`` ヘッダーを無視するように Traffic Server を設定す" +"るには `クライアントの no-cache ヘッダーを無視する Traffic Server の設定`_ " +"を参照してください。" + +#: ../../admin/http-proxy-caching.en.rst:469 +msgid "``Cookie``: header (for text objects)" +msgstr "``Cookie``: ヘッダー (テキストオブジェクト用)" + +#: ../../admin/http-proxy-caching.en.rst:471 +msgid "" +"By default, Traffic Server caches objects served in response to requests " +"that contain cookies (unless the object is text). You can configure Traffic " +"Server to not cache cookied content of any type, cache all cookied content, " +"or cache cookied content that is of image type only. For more information, " +"refer to `Caching Cookied Objects`_." +msgstr "" +"デフォルトでは、クッキーを含むリクエストへ返されるオブジェクト(テキストを除" +"くオブジェクト) を Traffic Server はキャッシュします。Traffic Server を次の" +"用に設定することができます。クッキーを持ったどんなタイプのコンテンツでも" +"キャッシュしない、クッキーを持った全てのコンテンツをキャッシュする、もしくは" +"クッキーを持った画像だけをキャッシュする。より詳しくは `クッキーオブジェクト" +"のキャッシュ`_ を参照してください。" + +#: ../../admin/http-proxy-caching.en.rst:478 +msgid "Configuring Traffic Server to Ignore Client no-cache Headers" +msgstr "クライアントの no-cache ヘッダーを無視する Traffic Server の設定" + +#: ../../admin/http-proxy-caching.en.rst:480 +msgid "" +"By default, Traffic Server strictly observes client ``Cache-Control: no-" +"cache`` directives. If a requested object contains a ``no-cache`` header, " +"then Traffic Server forwards the request to the origin server even if it " +"has a fresh copy in cache. You can configure Traffic Server to ignore " +"client ``no-cache`` directives such that it ignores ``no-cache`` headers " +"from client requests and serves the object from its cache." +msgstr "" +"デフォルトでは Traffic Server はクライアントの ``Cache-Control: no-cache`` " +"ディレクティブを正確に守ります。リクエストされたオブジェクトが ``no-cache`` " +"を含んでいる場合、Traffic Server はキャッシュのコピーが新鮮であったとして" +"も、オリジンサーバーにリクエストを転送します。Traffic Server がクライアント" +"からの ``no-cache`` ディレクティブを無視するように設定することもできます。こ" +"の場合、クライアントからのリクエストの ``no-cache`` ヘッダーを無視して、" +"キャッシュからオブジェクトを配信します。" + +#: ../../admin/http-proxy-caching.en.rst:488 +msgid "To configure Traffic Server to ignore client ``no-cache`` headers" +msgstr "``no-cache`` ヘッダーを無視するように Traffic Server を設定するには" + +#: ../../admin/http-proxy-caching.en.rst:492 +msgid ":ts:cv:`proxy.config.http.cache.ignore_client_no_cache`" +msgstr ":ts:cv:`proxy.config.http.cache.ignore_client_no_cache`" + +#: ../../admin/http-proxy-caching.en.rst:498 +msgid "Origin Server Directives" +msgstr "オリジンサーバーディレクティブ" + +#: ../../admin/http-proxy-caching.en.rst:500 +msgid "" +"By default, Traffic Server does *not* cache objects with the following " +"**response headers**:" +msgstr "" +"デフォルトでは Traffic Server は次の **レスポンスヘッダー** を持つようなオブ" +"ジェクトをキャッシュ *しません*。" + +#: ../../admin/http-proxy-caching.en.rst:504 +msgid "``Cache-Control: private`` header" +msgstr "``Cache-Control: private`` ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:505 +msgid "``WWW-Authenticate``: header" +msgstr "``WWW-Authenticate``: ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:507 +msgid "" +"To configure Traffic Server to ignore ``WWW-Authenticate`` headers, refer " +"to `Configuring Traffic Server to Ignore WWW-Authenticate Headers`_." +msgstr "" +"``WWW-Authenticate`` ヘッダーを無視するように Traffic Server を設定 するに" +"は `WWW-Authenticate ヘッダーを無視する Traffic Server の設定`_ を参照してく" +"ださい。" + +#: ../../admin/http-proxy-caching.en.rst:510 +msgid "``Set-Cookie``: header" +msgstr "``Set-Cookie``: ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:511 +msgid "``Cache-Control: no-cache`` headers" +msgstr "``Cache-Control: no-cache`` ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:513 +msgid "" +"To configure Traffic Server to ignore ``no-cache`` headers, refer to " +"`Configuring Traffic Server to Ignore Server no-cache Headers`_." +msgstr "" +"``no-cache`` ヘッダーを無視するように Traffic Server を設定するには `サー" +"バーの no-cache ヘッダーを無視する Traffic Server の設定`_ を参照してくださ" +"い。" + +#: ../../admin/http-proxy-caching.en.rst:516 +msgid "``Expires``: header with value of 0 (zero) or a past date" +msgstr "値が 0 (ゼロ)もしくは過去の日付の ``Expires`` ヘッダー" + +#: ../../admin/http-proxy-caching.en.rst:519 +msgid "Configuring Traffic Server to Ignore Server no-cache Headers" +msgstr "サーバーの no-cache ヘッダーを無視する Traffic Server の設定" + +#: ../../admin/http-proxy-caching.en.rst:521 +msgid "" +"By default, Traffic Server strictly observes ``Cache-Control: no-cache`` " +"directives. A response from an origin server with a ``no-cache`` header is " +"not stored in the cache and any previous copy of the object in the cache is " +"removed. If you configure Traffic Server to ignore ``no-cache`` headers, " +"then Traffic Server also ignores ``no-store`` headers. The default behavior " +"of observing ``no-cache`` directives is appropriate in most cases." +msgstr "" +"デフォルトでは Traffic Server は ``Cache-Control: no-cache`` ディレクティブ" +"を正確に守ります。``no-cache`` ヘッダーが付いているオリジンサーバーからのレ" +"スポンスはキャッシュに保存されません。また、以前キャッシュされたオブジェクト" +"のコピーは削除されます。 ``no-cache`` ヘッダーを無視するように Traffic " +"Server を設定した場合、Traffic Server は ``no-store`` ヘッダーも無視します。" +"``no-cache`` ディレクティブを守るデフォルトの振る舞いはほとんどの場合に適切" +"です。" + +#: ../../admin/http-proxy-caching.en.rst:529 +msgid "To configure Traffic Server to ignore server ``no-cache`` headers" +msgstr "" +"サーバーの ``no-cache`` ヘッダーを無視するように Traffic Server を設定するに" +"は" + +#: ../../admin/http-proxy-caching.en.rst:531 +msgid "" +"Edit the variable :ts:cv:`proxy.config.http.cache.ignore_server_no_cache`" +msgstr "" +":ts:cv:`proxy.config.http.cache.ignore_server_no_cache` 変数を変更してくださ" +"い。" + +#: ../../admin/http-proxy-caching.en.rst:537 +msgid "Configuring Traffic Server to Ignore WWW-Authenticate Headers" +msgstr "WWW-Authenticate ヘッダーを無視する Traffic Server の設定" + +#: ../../admin/http-proxy-caching.en.rst:539 +msgid "" +"By default, Traffic Server does not cache objects that contain ``WWW-" +"Authenticate`` response headers. The ``WWW-Authenticate`` header contains " +"authentication parameters the client uses when preparing the authentication " +"challenge response to an origin server." +msgstr "" +"デフォルトでは Traffic Server は ``WWW-Authenticate`` レスポンスヘッダーを含" +"むオブジェクトをキャッシュしません。 ``WWW-Authenticate`` ヘッダーはクライア" +"ントがオリジンサーバーへのチャレンジレスポンス認証の際に使う認証パラメーター" +"を含んでいます。" + +#: ../../admin/http-proxy-caching.en.rst:544 +msgid "" +"When you configure Traffic Server to ignore origin server ``WWW-" +"Authenticate`` headers, all objects with ``WWW-Authenticate`` headers are " +"stored in the cache for future requests. However, the default behavior of " +"not caching objects with ``WWW-Authenticate`` headers is appropriate in " +"most cases. Only configure Traffic Server to ignore server ``WWW-" +"Authenticate`` headers if you are knowledgeable about HTTP 1.1." +msgstr "" +"オリジンサーバーの ``WWW-Authenticate`` ヘッダーを無視するように Traffic " +"Server を設定した場合、 ``WWW-Authenticate`` ヘッダーを持つ全てのオブジェク" +"トは次のリクエストの為にキャッシュに保存されます。しかし、 ``WWW-" +"Authenticate`` ヘッダーを持つオブジェクトをキャッシュしないデフォルトの振る" +"舞いは多くの場合に適切です。 ``WWW-Authenticate`` ヘッダーを無視するように " +"Traffic Server を設定するのは HTTP 1.1 に精通してる場合にだけにしてくださ" +"い。" + +#: ../../admin/http-proxy-caching.en.rst:552 +msgid "" +"To configure Traffic Server to ignore server ``WWW-Authenticate`` headers" +msgstr "" +"``WWW-Authenticate`` ヘッダーを無視するように Traffic Server を設定するには" + +#: ../../admin/http-proxy-caching.en.rst:555 +msgid "" +"Edit the variable :ts:cv:`proxy.config.http.cache.ignore_authentication`" +msgstr "" +":ts:cv:`proxy.config.http.cache.ignore_authentication` 変数を変更してくださ" +"い。" + +#: ../../admin/http-proxy-caching.en.rst:561 +msgid "Configuration Directives" +msgstr "設定ディレクティブ" + +#: ../../admin/http-proxy-caching.en.rst:563 +msgid "" +"In addition to client and origin server directives, Traffic Server responds " +"to directives you specify through configuration options and files." +msgstr "" +"クライアントやオリジンサーバーのディレクティブに加えて、Traffic Server は設" +"定オプションやファイルを通じて設定したディレクティブにも反応します。" + +#: ../../admin/http-proxy-caching.en.rst:567 +msgid "You can configure Traffic Server to do the following:" +msgstr "次のように Traffic Server を設定することができます。" + +#: ../../admin/http-proxy-caching.en.rst:569 +msgid "" +"*Not* cache any HTTP objects (refer to `Disabling HTTP Object Caching`_)." +msgstr "" +"どんな HTTP オブジェクトもキャッシュ *しない* ( `HTTP オブジェクトキャッシュ" +"の無効化`_ 参照)" + +#: ../../admin/http-proxy-caching.en.rst:570 +msgid "" +"Cache **dynamic content** - that is, objects with URLs that end in ``.asp`` " +"or contain a question mark (``?``), semicolon (**``;``**), or **``cgi``**. " +"For more information, refer to `Caching Dynamic Content`_." +msgstr "" +"**動的コンテンツ** をキャッシュする - ``.asp`` で終わったり、クエスチョン" +"マーク (``?``)、セミコロン (``;``) や ``cgi`` を含んでいたりする URL のオブ" +"ジェクト。より詳しくは `動的コンテンツのキャッシュ`_ を参照してください。" + +#: ../../admin/http-proxy-caching.en.rst:573 +msgid "" +"Cache objects served in response to the ``Cookie:`` header (refer to " +"`Caching Cookied Objects`_." +msgstr "" +"``Cookie:`` ヘッダーに対して返されるオブジェクトをキャッシュする ( `クッキー" +"オブジェクトのキャッシュ`_ 参照)" + +#: ../../admin/http-proxy-caching.en.rst:575 +msgid "Observe ``never-cache`` rules in the :file:`cache.config` file." +msgstr ":file:`cache.config` ファイルの ``never-cache`` ルールに従う" + +#: ../../admin/http-proxy-caching.en.rst:578 +msgid "Disabling HTTP Object Caching" +msgstr "HTTP オブジェクトキャッシュの無効化" + +#: ../../admin/http-proxy-caching.en.rst:580 +msgid "" +"By default, Traffic Server caches all HTTP objects except those for which " +"you have set ``never-cache`` as :ref:`action rules ` in the :file:`cache.config` file. You can disable HTTP object " +"caching so that all HTTP objects are served directly from the origin server " +"and never cached, as detailed below." +msgstr "" +"デフォルトでは Traffic Server は :file:`cache.config` ファイルに設定した " +"``never-cache`` :ref:`アクションルール ` を除く" +"全ての HTTP オブジェクトをキャッシュします。後述するように HTTP オブジェク" +"トがオリジンサーバーから直接配信され、決してキャッシュされな いように HTTP " +"オブジェクトのキャッシュを無効化することができます。" + +#: ../../admin/http-proxy-caching.en.rst:586 +msgid "To disable HTTP object caching manually" +msgstr "HTTP オブジェクトを手動で無効化するには" + +#: ../../admin/http-proxy-caching.en.rst:588 +msgid "Set the variable :ts:cv:`proxy.config.http.enabled` to ``0``." +msgstr ":ts:cv:`proxy.config.http.enabled` 変数に ``0`` を設定してください。." + +#: ../../admin/http-proxy-caching.en.rst:594 +msgid "Caching Dynamic Content" +msgstr "動的コンテンツのキャッシュ" + +#: ../../admin/http-proxy-caching.en.rst:596 +msgid "" +"A URL is considered **dynamic** if it ends in **``.asp``** or contains a " +"question mark (``?``), a semicolon (``;``), or ``cgi``. By default, Traffic " +"Server caches dynamic content. You can configure the system to ignore " +"dyanamic looking content, although this is recommended only if the content " +"is *truely* dyanamic, but fails to advertise so with appropriate ``Cache-" +"Control`` headers." +msgstr "" +"``.asp`` で終わったり、クエスチョンマーク (``?``)、セミコロン (``;``) や " +"``cgi`` を含んでいたりする URL は **動的** であると考えられます。デフォルト" +"では Traffic Server は動的コンテンツをキャッシュします。コンテンツが *本当に" +"* 動的である場合にだけ推奨されますが、適切な ``Cache-Control`` ヘッダーに" +"よって伝えることができないとき、動的だと思われるコンテンツを無視するようにシ" +"ステムを設定することができます。" + +#: ../../admin/http-proxy-caching.en.rst:603 +msgid "" +"To configure Traffic Server's cache behaviour in regard to dynamic content" +msgstr "動的コンテンツに配慮した Traffic Server の振る舞いを設定するには" + +#: ../../admin/http-proxy-caching.en.rst:608 +msgid ":ts:cv:`proxy.config.http.cache.cache_urls_that_look_dynamic`" +msgstr ":ts:cv:`proxy.config.http.cache.cache_urls_that_look_dynamic`" + +#: ../../admin/http-proxy-caching.en.rst:614 +msgid "Caching Cookied Objects" +msgstr "クッキーオブジェクトのキャッシュ" + +#: ../../admin/http-proxy-caching.en.rst:618 +msgid "" +"By default, Traffic Server caches objects served in response to requests " +"that contain cookies. This is true for all types of objects except for " +"text. Traffic Server does not cache cookied text content because object " +"headers are stored along with the object, and personalized cookie header " +"values could be saved with the object. With non-text objects, it is " +"unlikely that personalized headers are delivered or used." +msgstr "" +"デフォルトではクッキーを含むリクエストに対するレスポンスとして配信されたオブ" +"ジェクトを Traffic Server はキャッシュします。これはテキストを除いた全てのタ" +"イプのオブジェクトについても同じです。Traffic Server はクッキーをもつテキス" +"トコンテンツをキャッシュしません。それはオブジェクトのヘッダーはオブジェクト" +"共に保存され、個人的なクッキーヘッダーの値もオブジェクトと共に保存されるため" +"です。テキストではないオブジェクトの場合、個人的なヘッダーは配信されたり使わ" +"れたりしません。" + +#: ../../admin/http-proxy-caching.en.rst:625 +msgid "You can reconfigure Traffic Server to:" +msgstr "次のように Traffic Server を設定し直すことができます。" + +#: ../../admin/http-proxy-caching.en.rst:627 +msgid "*Not* cache cookied content of any type." +msgstr "クッキーを含む全てのコンテンツをキャッシュ *しない*" + +#: ../../admin/http-proxy-caching.en.rst:628 +msgid "Cache cookied content that is of image type only." +msgstr "クッキーを含む画像のみキャッシュする" + +#: ../../admin/http-proxy-caching.en.rst:629 +msgid "Cache all cookied content regardless of type." +msgstr "タイプを考慮せずクッキーを含む全てのコンテンツをキャッシュする" + +#: ../../admin/http-proxy-caching.en.rst:631 +msgid "To configure how Traffic Server caches cookied content" +msgstr "" +"クッキーを含むコンテンツをどのようにキャッシュするか Traffic Server を設定す" +"るには" + +#: ../../admin/http-proxy-caching.en.rst:633 +msgid "" +"Edit the variable :ts:cv:`proxy.config.http.cache." +"cache_responses_to_cookies`" +msgstr "" +":ts:cv:`proxy.config.http.cache.cache_responses_to_cookies` 変数を変更してく" +"ださい。" + +#: ../../admin/http-proxy-caching.en.rst:639 +msgid "Forcing Object Caching" +msgstr "オブジェクトの強制キャッシュ" + +#: ../../admin/http-proxy-caching.en.rst:641 +msgid "" +"You can force Traffic Server to cache specific URLs (including dynamic " +"URLs) for a specified duration, regardless of ``Cache-Control`` response " +"headers." +msgstr "" +"``Cache-Control`` レスポンスヘッダーを無視して、特定の期間に特定の URL (動" +"的 URL も含む) をキャッシュすることを Traffic Server に強制することができま" +"す。" + +#: ../../admin/http-proxy-caching.en.rst:645 +msgid "To force document caching" +msgstr "ドキュメントを強制的にキャッシュするには" + +#: ../../admin/http-proxy-caching.en.rst:647 +msgid "" +"Add a rule for each URL you want Traffic Server to pin to the cache :file:" +"`cache.config`::" +msgstr "" +"Traffic Server にキャッシュに留めさせたい URL 毎に :file:`cache.config` に" +"ルールを追加してください。" + +#: ../../admin/http-proxy-caching.en.rst:656 +msgid "Caching HTTP Alternates" +msgstr "HTTP オブジェクトの代替のキャッシュ" + +#: ../../admin/http-proxy-caching.en.rst:658 +msgid "" +"Some origin servers answer requests to the same URL with a variety of " +"objects. The content of these objects can vary widely, according to whether " +"a server delivers content for different languages, targets different " +"browsers with different presentation styles, or provides different document " +"formats (HTML, XML). Different versions of the same object are termed " +"**alternates** and are cached by Traffic Server based on ``Vary`` response " +"headers. You can specify additional request and response headers for " +"specific ``Content-Type``\\s that Traffic Server will identify as " +"alternates for caching. You can also limit the number of alternate versions " +"of an object allowed in the cache." +msgstr "" +"いくつかの同一の URL へ複数のオブジェクトを回答するオリジンサーバーもありま" +"す。これらのオブジェクトのコンテンツはサーバーが異なる言語ごとにコンテンツを" +"配信したり、異なるブラウザ毎にプレゼンテーションスタイルを用意していたり、異" +"なるドキュメントフォーマット(HTML, XML) を提供しているか等により、多岐にわた" +"ります。同一オブジェクトの異なるバージョンは **代替** と呼ばれ、``Vary`` レ" +"スポンスヘッダーに基づいて Traffic Server にキャッシュされます。Traffic " +"Server がキャッシュする代替を判別する特別な ``Content-Type`` をリクエストや" +"レスポンスヘッダに追加することができます。キャッシュする代替バージョンの数を" +"制限することもできます。" + +#: ../../admin/http-proxy-caching.en.rst:670 +msgid "Configuring How Traffic Server Caches Alternates" +msgstr "Traffic Server がキャッシュする代替の設定" + +#: ../../admin/http-proxy-caching.en.rst:672 +msgid "" +"To configure how Traffic Server caches alternates, follow the steps below" +msgstr "" +"Traffic Server が代替をキャッシュするように設定するには、次のステップに従っ" +"てください。" + +#: ../../admin/http-proxy-caching.en.rst:677 +msgid ":ts:cv:`proxy.config.http.cache.enable_default_vary_headers`" +msgstr ":ts:cv:`proxy.config.http.cache.enable_default_vary_headers`" + +#: ../../admin/http-proxy-caching.en.rst:678 +msgid ":ts:cv:`proxy.config.http.cache.vary_default_text`" +msgstr ":ts:cv:`proxy.config.http.cache.vary_default_text`" + +#: ../../admin/http-proxy-caching.en.rst:679 +msgid ":ts:cv:`proxy.config.http.cache.vary_default_images`" +msgstr ":ts:cv:`proxy.config.http.cache.vary_default_images`" + +#: ../../admin/http-proxy-caching.en.rst:680 +msgid ":ts:cv:`proxy.config.http.cache.vary_default_other`" +msgstr ":ts:cv:`proxy.config.http.cache.vary_default_other`" + +#: ../../admin/http-proxy-caching.en.rst:687 +msgid "" +"If you specify ``Cookie`` as the header field on which to vary in the above " +"variables, make sure that the variable :ts:cv:`proxy.config.http.cache." +"cache_responses_to_cookies` is set appropriately." +msgstr "" +"上の変数に ``Cookie`` を切り替えるためのヘッダーフィールドとして指定した場" +"合、 :ts:cv:`proxy.config.http.cache.cache_responses_to_cookies` が適切に" +"セットされていることを確認して下さい。" + +#: ../../admin/http-proxy-caching.en.rst:693 +msgid "Limiting the Number of Alternates for an Object" +msgstr "オブジェクトの代替数の制限" + +#: ../../admin/http-proxy-caching.en.rst:695 +msgid "" +"You can limit the number of alternates Traffic Server can cache per object " +"(the default is 3)." +msgstr "" +"Traffic Server がオブジェクト毎にキャッシュする代替数を制限することができま" +"す。(デフォルトは 3 です)" + +#: ../../admin/http-proxy-caching.en.rst:700 +msgid "" +"Large numbers of alternates can affect Traffic Server cache performance " +"because all alternates have the same URL. Although Traffic Server can look " +"up the URL in the index very quickly, it must scan sequentially through " +"available alternates in the object store." +msgstr "" +"全ての代替は同一の URL を持つため、代替の数が多いと Traffic Server のキャッ" +"シュパフォーマンスに影響を与えるかもしれません。Traffic Server はインデック" +"ス中の URL をとても高速に検索しますが、キャッシュストアの中に使用可能な代替" +"があるかはシーケンシャルにスキャンしなければなりません。" + +#: ../../admin/http-proxy-caching.en.rst:705 +msgid "To limit the number of alternates" +msgstr "代替の数を制限するには" + +#: ../../admin/http-proxy-caching.en.rst:707 +msgid "Edit the variable :ts:cv:`proxy.config.cache.limits.http.max_alts`" +msgstr "" +":ts:cv:`proxy.config.cache.limits.http.max_alts` 変数を変更してください。" + +#: ../../admin/http-proxy-caching.en.rst:714 +msgid "Using Congestion Control" +msgstr "輻輳制御" + +#: ../../admin/http-proxy-caching.en.rst:716 +msgid "" +"The **Congestion Control** option enables you to configure Traffic Server " +"to stop forwarding HTTP requests to origin servers when they become " +"congested. Traffic Server then sends the client a message to retry the " +"congested origin server later." +msgstr "" +"**輻輳制御** オプションはオリジンサーバーが混雑しているときに Traffic " +"Server が HTTP リクエストを転送することを止めることを可能にします。Traffic " +"Server はその後、混雑してるオリジンサーバーに後でリトライするメッセージをク" +"ライアントに送ります。" + +#: ../../admin/http-proxy-caching.en.rst:721 +msgid "" +"To use the **Congestion Control** option, you must perform the following " +"tasks:" +msgstr "**輻輳制御** オプションを使うには、次のタスクを実行してください。" + +#: ../../admin/http-proxy-caching.en.rst:724 +msgid "" +"Set the variable :ts:cv:`proxy.config.http.congestion_control.enabled` to " +"``1``" +msgstr "" +":ts:cv:`proxy.config.http.congestion_control.enabled` 変数を ``1`` にセット" +"して下さい。" + +#: ../../admin/http-proxy-caching.en.rst:726 +msgid "Create rules in the :file:`congestion.config` file to specify:" +msgstr "" +"ルールを作成して :file:`congestion.config` ファイルに次のように記述してくだ" +"さい。" + +#: ../../admin/http-proxy-caching.en.rst:727 +msgid "which origin servers Traffic Server tracks for congestion" +msgstr "混雑しているか Traffic Server が追跡するオリジンサーバー" + +#: ../../admin/http-proxy-caching.en.rst:728 +msgid "" +"the timeouts Traffic Server uses, depending on whether a server is congested" +msgstr "" +"サーバーが混雑していると判定するために Traffic Sever が使用するタイムアウト" +"値" + +#: ../../admin/http-proxy-caching.en.rst:730 +msgid "" +"the page Traffic Server sends to the client when a server becomes congested" +msgstr "サーバー混雑時に Traffic Server がクライアントに送るページ" + +#: ../../admin/http-proxy-caching.en.rst:732 +msgid "" +"if Traffic Server tracks the origin servers per IP address or per hostname" +msgstr "" +"Traffic Server が IP アドレス毎に追跡するか、ホストネーム毎に追跡するかどう" +"か" + +#: ../../admin/http-proxy-caching.en.rst:741 +msgid "Using Transaction Buffering Control" +msgstr "トランザクションバッファリング制御" + +#: ../../admin/http-proxy-caching.en.rst:743 +msgid "" +"By default I/O operations are run at full speed, as fast as either Traffic " +"Server, the network, or the cache can go. This can be problematic for large " +"objects if the client side connection is significantly slower. In such " +"cases the content will be buffered in ram while waiting to be sent to the " +"client. This could potentially also happen for ``POST`` requests if the " +"client connection is fast and the origin server connection slow. If very " +"large objects are being used this can cause the memory usage of Traffic " +"Server to become `very large `_." +msgstr "" +"デフォルトでは I/O オペレーションは Traffic Server やネットワークやキャッ" +"シュが実行できる限り速くフルスピードで実行されます。これはクライアント側のコ" +"ネクションが遅い場合に、大きなオブジェクトにとって問題になる可能性がありま" +"す。このような場合、クライアントに送られるのを待っている間、コンテンツはメモ" +"リにバッファーされます。これはクライアントのコネクションが早く、オリジンサー" +"バーのコネクションが遅い場合に ``POST`` リクエストでも発生し得ます。とても大" +"きなオブジェクトが使われているとこれは Traffic Server のメモリ使用量がとても" +"大きくなる原因になり得ます。 `very large `_" + +#: ../../admin/http-proxy-caching.en.rst:750 +msgid "" +"This problem can be ameloriated by controlling the amount of buffer space " +"used by a transaction. A high water and low water mark are set in terms of " +"bytes used by the transaction. If the buffer space in use exceeds the high " +"water mark, the connection is throttled to prevent additional external data " +"from arriving. Internal operations continue to proceed at full speed until " +"the buffer space in use drops below the low water mark and external data I/" +"O is re-enabled." +msgstr "" +"この問題はトランザクションが使うバッファースペースの量をコントロールすること" +"で改善されるかもしれません。high water と low water マークはトランザクション" +"で使用されるバイトの幅をセットします。バッファースペースが high water マーク" +"を越えた場合、コネクションは追加のデータがやってくることを止める為に減速しま" +"す。使われているバッファースペースが low water マークを下回り、外部のデータ " +"I/O が再開されるまで内側のオペレーションはフルスピードで進み続けます。" + +#: ../../admin/http-proxy-caching.en.rst:755 +msgid "" +"Although this is intended primarily to limit the memory usage of Traffic " +"Server it can also serve as a crude rate limiter by setting a buffer limit " +"and then throttling the client side connection either externally or via a " +"transform. This will cause the connection to the origin server to be " +"limited to roughly the client side connection speed." +msgstr "" +"主に Traffic Server のメモリ使用量を制限することを意図していますが、これはま" +"た大雑把なレートリミッターも提供します。これはバッファーリミットの設定と、外" +"部やトランスフォームの影響により、クライアント側のコネクションを減速させるこ" +"とによります。これはオリジンサーバーへのコネクションがクライアント側のコネク" +"ションスピードにより大まかに制限されることをもたします。" + +#: ../../admin/http-proxy-caching.en.rst:759 +msgid "" +"Traffic Server does network I/O in large chunks (32K or so) and therefore " +"the granularity of transaction buffering control is limited to a similar " +"precision." +msgstr "" +"Traffic Server はネットワーク I/O をラージチャンク(32K など) で行います。" +"よって、トランザクションバッファリングコントロールの粒度は同じような値に制限" +"されています。" + +#: ../../admin/http-proxy-caching.en.rst:762 +msgid "" +"The buffer size calculations include all elements in the transaction, " +"including any buffers associated with :ref:`transform plugins `." +msgstr "" +"バッファーサイズの計算はトランザクションの全ての要素を含んでいます。これは :" +"ref:`transform plugins ` に紐づけられているどんなバッ" +"ファーも含みます。" + +#: ../../admin/http-proxy-caching.en.rst:764 +msgid "" +"Transaction buffering control can be enabled globally by using " +"configuration variables or by :c:func:`TSHttpTxnConfigIntSet` in a plugin." +msgstr "" +"トランザクションバッファーコントロールは設定変数を使ってグローバルに有効化す" +"ることもできます。また :c:func:`TSHttpTxnConfigIntSet` を使用してプラグイン" +"の中で有効化することもできます。" + +#: ../../admin/http-proxy-caching.en.rst:767 +msgid "Value" +msgstr "値" + +#: ../../admin/http-proxy-caching.en.rst:767 +msgid "Variable" +msgstr "変数" + +#: ../../admin/http-proxy-caching.en.rst:767 +msgid "`TSHttpTxnConfigIntSet` key" +msgstr "`TSHttpTxnConfigIntSet` キー" + +#: ../../admin/http-proxy-caching.en.rst:769 +msgid "Enable buffering" +msgstr "バッファーの有効化" + +#: ../../admin/http-proxy-caching.en.rst:769 +msgid ":ts:cv:`proxy.config.http.flow_control.enabled`" +msgstr ":ts:cv:`proxy.config.http.flow_control.enabled`" + +#: ../../admin/http-proxy-caching.en.rst:769 +msgid "`TS_CONFIG_HTTP_FLOW_CONTROL_ENABLED`" +msgstr "`TS_CONFIG_HTTP_FLOW_CONTROL_ENABLED`" + +#: ../../admin/http-proxy-caching.en.rst:770 +msgid "Set high water" +msgstr "high water の設定" + +#: ../../admin/http-proxy-caching.en.rst:770 +msgid ":ts:cv:`proxy.config.http.flow_control.high_water`" +msgstr ":ts:cv:`proxy.config.http.flow_control.high_water`" + +#: ../../admin/http-proxy-caching.en.rst:770 +msgid "`TS_CONFIG_HTTP_FLOW_CONTROL_HIGH_WATER`" +msgstr "`TS_CONFIG_HTTP_FLOW_CONTROL_HIGH_WATER`" + +#: ../../admin/http-proxy-caching.en.rst:771 +msgid "Set low water" +msgstr "low water の設定" + +#: ../../admin/http-proxy-caching.en.rst:771 +msgid ":ts:cv:`proxy.config.http.flow_control.low_water`" +msgstr ":ts:cv:`proxy.config.http.flow_control.low_water`" + +#: ../../admin/http-proxy-caching.en.rst:771 +msgid "`TS_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER`" +msgstr "`TS_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER`" + +#: ../../admin/http-proxy-caching.en.rst:774 +msgid "" +"Be careful to always have the low water mark equal or less than the high " +"water mark. If you set only one, the other will be set to the same value." +msgstr "" +"low water マークは high water マークと常に同じか少ないことに注意してくださ" +"い。一方だけを設定すると、もう一方は同じ値に設定されます。" + +#: ../../admin/http-proxy-caching.en.rst:777 +msgid "" +"If using c:func:`TSHttpTxnConfigIntSet`, it must be called no later than " +"`TS_HTTP_READ_RESPONSE_HDR_HOOK`." +msgstr "" +"c:func:`TSHttpTxnConfigIntSet` を使う場合、`TS_HTTP_READ_RESPONSE_HDR_HOOK` " +"のすぐ後ろで呼ばれなければなりません。" + +#: ../../admin/http-proxy-caching.en.rst:782 +msgid "Reducing Origin Server Requests (Avoiding the Thundering Herd)" +msgstr "オリジンサーバーへのリクエストの削減(Thundering Herd 問題を避ける)" + +#: ../../admin/http-proxy-caching.en.rst:784 +msgid "" +"When an object can not be served from cache, the request will be proxied to " +"the origin server. For a popular object, this can result in many near " +"simultaneous requests to the origin server, potentially overwhelming it or " +"associated resources. There are several features in Traffic Server that can " +"be used to avoid this scenario." +msgstr "" +"オブジェクトがキャッシュから配信されない場合、リクエストはオリジンサーバーに" +"プロキシーされます。ポピュラーなオブジェクトにとって、これはオリジンサーバー" +"へ多くの同じ様なリクエストを送り、可能性としては計り知れない程の関連したリ" +"ソースを使うかもしれません。Traffic Server にはこのシナリオを避けられるいく" +"つかの機能があります。" + +#: ../../admin/http-proxy-caching.en.rst:789 +msgid "Read While Writer" +msgstr "Read While Writer" + +#: ../../admin/http-proxy-caching.en.rst:790 +msgid "" +"When Traffic Server goes to fetch something from origin, and upon receiving " +"the response, any number of clients can be allowed to start serving the " +"partially filled cache object once background_fill_completed_threshold % of " +"the object has been received. The difference is that Squid allows this as " +"soon as it goes to origin, whereas ATS can not do it until we get the " +"complete response header. The reason for this is that we make no " +"distinction between cache refresh, and cold cache, so we have no way to " +"know if a response is going to be cacheable, and therefore allow read-while-" +"writer functionality." +msgstr "" +"Traffic Server がオリジンからオブジェクトをフェッチしに行くとき、そしてレス" +"ポンスを受け取るとき、受け取ったオブジェクトの " +"background_fill_completed_threshold % が満たされた部分的キャッシュオブジェク" +"トを配信することがどんな数のクライアントにも許されています。違いは Squid は" +"オリジンに行ったすぐ後で許可していますが、ATS は完全なレスポンスヘッダーを受" +"け取るまでできないことです。この理由はキャッシュリフレッシュとコールドキャッ" +"シュの間で差異を作りたくないからです。レスポンスがキャッシュ可能なものか知る" +"方法がないので、read-while-writer 機能を許可しています。" + +#: ../../admin/http-proxy-caching.en.rst:792 +msgid "The configurations necessary to enable this in ATS are:" +msgstr "ATS でこれを有効化する為に必要な設定は" + +#: ../../admin/http-proxy-caching.en.rst:794 +msgid "CONFIG :ts:cv:`proxy.config.cache.enable_read_while_writer` ``INT 1``" +msgstr "CONFIG :ts:cv:`proxy.config.cache.enable_read_while_writer` ``INT 1``" + +#: ../../admin/http-proxy-caching.en.rst:795 +msgid "" +"CONFIG :ts:cv:`proxy.config.http.background_fill_active_timeout` ``INT 0``" +msgstr "" +"CONFIG :ts:cv:`proxy.config.http.background_fill_active_timeout` ``INT 0``" + +#: ../../admin/http-proxy-caching.en.rst:796 +msgid "" +"CONFIG :ts:cv:`proxy.config.http.background_fill_completed_threshold` " +"``FLOAT 0.000000``" +msgstr "" +"CONFIG :ts:cv:`proxy.config.http.background_fill_completed_threshold` " +"``FLOAT 0.000000``" + +#: ../../admin/http-proxy-caching.en.rst:797 +msgid "CONFIG :ts:cv:`proxy.config.cache.max_doc_size` ``INT 0``" +msgstr "CONFIG :ts:cv:`proxy.config.cache.max_doc_size` ``INT 0``" + +#: ../../admin/http-proxy-caching.en.rst:799 +msgid "All four configurations are required, for the following reasons:" +msgstr "次の理由により、4つ全ての設定が必要です。" + +#: ../../admin/http-proxy-caching.en.rst:801 +msgid "enable_read_while_writer turns the feature on. It's off (0) by default" +msgstr "" +"enable_read_while_writer は機能を on にします。デフォルトでは off (0) です。" + +#: ../../admin/http-proxy-caching.en.rst:802 +msgid "" +"The background fill feature should be allowed to kick in for every possible " +"request. This is necessary, in case the writer (\"first client session\") " +"goes away, someone needs to take over the session. Hence, you should set " +"the background fill timeouts and threshold to zero; this assures they never " +"times out and always is allowed to kick in." +msgstr "" +"バックグラウンドフィル機能は全てのあり得るリクエストでキックされることが許可" +"されているべきです。これは重要で、writer (\"最初のクライアントセッション\") " +"が出て行った場合、誰かがセッションを引継ぐ必要があります。したがって、バック" +"グラウンドフィルタイムアウトを設定し、境界点をゼロにするべきです。これは彼ら" +"を安心させます。タイムアウトせずに、キックインすることを常に許可されていま" +"す。" + +#: ../../admin/http-proxy-caching.en.rst:803 +msgid "" +"The proxy.config.cache.max_doc_size should be unlimited (set to 0), since " +"the object size may be unknown, and going over this limit would cause a " +"disconnect on the objects being served." +msgstr "" +"proxy.config.cache.max_doc_size は無制限(0)に設定されているべきです。オブ" +"ジェクトサイズは分からないので。この制限は配信されているオブジェクトのコネク" +"ションの切断の原因になるかもしれません。" + +#: ../../admin/http-proxy-caching.en.rst:805 +msgid "" +"Once all this enabled, you have something that is very close, but not quite " +"the same, as Squid's Collapsed Forwarding." +msgstr "" +"一度これら全てが有効化されると、Squid の Collapesd Forwarding にとても近いが" +"異なるものができます。" + +#: ../../admin/http-proxy-caching.en.rst:812 +msgid "Fuzzy Revalidation" +msgstr "Fuzzy Revalidation" + +#: ../../admin/http-proxy-caching.en.rst:813 +msgid "" +"Traffic Server can be set to attempt to revalidate an object before it " +"becomes stale in cache. :file:`records.config` contains the settings:" +msgstr "" +"Traffic Server はキャッシュが新鮮でなくなる前に、オブジェクトの再検証を試み" +"るように設定することもできます。:file:`records.config` に次の設定がありま" +"す。" + +#: ../../admin/http-proxy-caching.en.rst:815 +msgid "CONFIG :ts:cv:`proxy.config.http.cache.fuzz.time` ``INT 240``" +msgstr "CONFIG :ts:cv:`proxy.config.http.cache.fuzz.time` ``INT 240``" + +#: ../../admin/http-proxy-caching.en.rst:816 +msgid "CONFIG :ts:cv:`proxy.config.http.cache.fuzz.min_time` ``INT 0``" +msgstr "CONFIG :ts:cv:`proxy.config.http.cache.fuzz.min_time` ``INT 0``" + +#: ../../admin/http-proxy-caching.en.rst:817 +msgid "" +"CONFIG :ts:cv:`proxy.config.http.cache.fuzz.probability` ``FLOAT 0.005``" +msgstr "" +"CONFIG :ts:cv:`proxy.config.http.cache.fuzz.probability` ``FLOAT 0.005``" + +#: ../../admin/http-proxy-caching.en.rst:819 +msgid "" +"For every request for an object that occurs \"fuzz.time\" before (in the " +"example above, 240 seconds) the object is set to become stale, there is a " +"small chance (fuzz.probability == 0.5%) that the request will trigger a " +"revalidation request to the origin. For objects getting a few requests per " +"second, this would likely not trigger, but then this feature is not " +"necessary anyways since odds are only 1 or a small number of connections " +"would hit origin upon objects going stale. The defaults are a good " +"compromise, for objects getting roughly 4 requests / second or more, it's " +"virtually guaranteed to trigger a revalidate event within the 240s. These " +"configs are also overridable per remap rule or via a plugin, so can be " +"adjusted per request if necessary." +msgstr "" +"新鮮でないとセットされる前に \"fuzz.time\" (上の例では 240 秒) を発生させる" +"すべてのオブジェクトへのリクエストには、オリジンへの再検証リクエストを引き起" +"こすわずかな可能性 ( fuzz.probability == 0.5% ) があります。秒間 2,3 リクエ" +"スト数しかないオブジェクトでは、これはまず引き起こされません。また、オッズが" +"たったの 1 やオブジェクトが新鮮ではなくなり、コネクションがオリジンをヒット" +"する数が少ないとき、この機能は必要ではありません。デフォルト値はいい妥協点で" +"あり、おおよそ秒間 4 リクエスト以上のオブジェクトにとって、240 秒以内に再検" +"証が引き起こされることが実質的に保証されます。これらの設定は remap ルール毎" +"やプラグインによって上書きすることもできるので、必要であればリクエスト毎に調" +"整することもできます。" + +#: ../../admin/http-proxy-caching.en.rst:822 +msgid "" +"Note that if the revalidation occurs, the requested object is no longer " +"available to be served from cache. Subsequent requests for that object " +"will be proxied to the origin." +msgstr "" +"再検証が起きた場合、リクエストされたオブジェクトはもはやキャッシュから配信す" +"ることはできないことに注意してください。続いて起きるオブジェクトへのリクエス" +"トはオリジンサーバーにプロキシーされます。" + +#: ../../admin/http-proxy-caching.en.rst:825 +msgid "" +"Finally, the fuzz.min_time is there to be able to handle requests with a " +"TTL less than fuzz.time – it allows for different times to evaluate the " +"probability of revalidation for small TTLs and big TTLs. Objects with small " +"TTLs will start \"rolling the revalidation dice\" near the fuzz.min_time, " +"while objects with large TTLs would start at fuzz.time. A logarithmic like " +"function between determines the revalidation evaluation start time (which " +"will be between fuzz.min_time and fuzz.time). As the object gets closer to " +"expiring, the window start becomes more likely. By default this setting is " +"not enabled, but should be enabled anytime you have objects with small " +"TTLs. Note that this option predates overridable configurations, so you can " +"achieve something similar with a plugin or remap.config conf_remap.so " +"configs." +msgstr "" +"最後に fuzz.min_time は fuzz.time より短い TTL のリクエストを制御することを" +"可能にするためにあります - それは小さい TTL と大きい TTL で再検証の確率を評" +"価する時間が異なることを許容します。TTL の小さなオブジェクトは fuzz." +"min_time 付近で \"再検証のサイコロを転がす\" ことを始めます。一方、大きな " +"TTL のオブジェクトは fuzz.time から始めます。対数のような関数が再検証査定を" +"始める時間を決定します。(その値は fuzz.min_time と fuzz.time の間でしょう) " +"期限切れに近いオブジェクトでは、期間の始まりはより可能性が高くなります。デ" +"フォルトではこの設定は有効化されていません。しかし、TTL の小さなオブジェクト" +"がある場合、いつでも有効化するべきです。このオプションは設定を上書きする前に" +"起きることに注意してください。よって、プラグインや remap.config conf_remap." +"so configs のようなものを作ることができます。" + +#: ../../admin/http-proxy-caching.en.rst:827 +msgid "" +"These configurations are similar to Squid's refresh_stale_hit configuration " +"option." +msgstr "" +"これらの設定は Squid の refresh_stale_hit 設定オプションに似ています。" + +#: ../../admin/http-proxy-caching.en.rst:831 +msgid "Open Read Retry Timeout" +msgstr "Open Read Retry Timeout" + +#: ../../admin/http-proxy-caching.en.rst:833 +msgid "" +"The open read retry configurations attempt to reduce the number of " +"concurrent requests to the origin for a given object. While an object is " +"being fetched from the origin server, subsequent requests would wait " +"open_read_retry_time milliseconds before checking if the object can be " +"served from cache. If the object is still being fetched, the subsequent " +"requests will retry max_open_read_retries times. Thus, subsequent requests " +"may wait a total of (max_open_read_retries x open_read_retry_time) " +"milliseconds before establishing an origin connection of its own. For " +"instance, if they are set to 5 and 10 respectively, connections will wait " +"up to 50ms for a response to come back from origin from a previous request, " +"until this request is allowed through." +msgstr "" +"オープンリードリトライ設定は与えられたオブジェクトに対してオリジンサーバーへ" +"の並列リクエストの数を減らすことを試みています。あるオブジェクトがオリジン" +"サーバーからフェッチされている間、次のリクエストはオブジェクトがキャッシュか" +"ら配信できるかどうかを確認する前に open_read_retry_time ミリ秒待ちます。オブ" +"ジェクトが依然としてフェッチされている場合、次のリクエストは " +"max_open_read_retries 回リトライします。すると、次のリクエストはオリジンサー" +"バーへのコネクションを自分自身で確立する前に合計で (max_open_read_retries x " +"open_read_retry_time) ミリ秒待ちます。例えばそれぞれ 5 や 10 にセットされた" +"場合、このリクエストが許可されるまでコネクションは前回のリクエストがオリジン" +"からレスポンスが帰ってくる間 50ms 待ちます。" + +#: ../../admin/http-proxy-caching.en.rst:835 +msgid "" +"These settings are inappropriate when objects are uncacheable. In those " +"cases, requests for an object effectively become serialized. The subsequent " +"requests would await at least open_read_retry_time milliseconds before " +"being proxies to the origin." +msgstr "" +"これらの設定はオブジェクトがキャッシュ不可能な場合、適切ではありません。これ" +"らの場合、オブジェクトへのリクエストは実際には直列になります。次のリクエスト" +"はオリジンにプロキシーされる前に少なくとも open_read_retry_time ミリ秒待たさ" +"れるでしょう。" + +#: ../../admin/http-proxy-caching.en.rst:837 +msgid "" +"Similarly, this setting should be used in conjunction with Read While " +"Writer for big (those that take longer than (max_open_read_retries x " +"open_read_retry_time) milliseconds to transfer) cacheable objects. Without " +"the read-while-writer settings enabled, while the initial fetch is ongoing, " +"not only would subsequent requests be delayed by the maximum time, but " +"also, those requests would result in another request to the origin server." +msgstr "" +"同様にこの設定は大きな (転送に (max_open_read_retries x " +"open_read_retry_time) ミリ秒以上かかる) キャッシュ可能なオブジェクトの Read " +"While Writer の続発の中では使われるべきです。 read-while-writer 設定を有効化" +"しないと、初回のフェッチが行われている間、次のリクエストが最大限遅れるだけで" +"はなく、結果としてオリジンサーバーへの他のリクエストを発生させます。" + +#: ../../admin/http-proxy-caching.en.rst:839 +msgid "" +"Since ATS now supports setting these settings per-request or remap rule, " +"you can configure this to be suitable for your setup much more easily." +msgstr "" +"ATS はリクエスト毎や remap ルールに設定することをサポートしているので、これ" +"はより簡単に適切に設定することができます。" + +#: ../../admin/http-proxy-caching.en.rst:841 +msgid "The configurations are (with defaults):" +msgstr "設定とそのデフォルト値は" + +#: ../../admin/http-proxy-caching.en.rst:843 +msgid "" +"CONFIG :ts:cv:`proxy.config.http.cache.max_open_read_retries` ``INT -1``" +msgstr "" +"CONFIG :ts:cv:`proxy.config.http.cache.max_open_read_retries` ``INT -1``" + +#: ../../admin/http-proxy-caching.en.rst:844 +msgid "" +"CONFIG :ts:cv:`proxy.config.http.cache.open_read_retry_time` ``INT 10``" +msgstr "" +"CONFIG :ts:cv:`proxy.config.http.cache.open_read_retry_time` ``INT 10``" + +#: ../../admin/http-proxy-caching.en.rst:846 +msgid "" +"The default means that the feature is disabled, and every connection is " +"allowed to go to origin instantly. When enabled, you will try " +"max_open_read_retries times, each with a open_read_retry_time timeout." +msgstr "" +"デフォルトはこの機能が無効化されていて、全てのコネクションはオリジンにすぐに" +"行くことを許可されていることを意味します。有効化した場合、" +"open_read_retry_time タイムアウト毎に max_open_read_retries 回試すでしょう。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/index.en.po b/doc/locale/ja/LC_MESSAGES/admin/index.en.po new file mode 100644 index 00000000..d5b8b5cc --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/index.en.po @@ -0,0 +1,643 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: 2013-11-23 00:20+0900\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/index.en.rst:4 +msgid "Administrators' Guide" +msgstr "管理者ガイド" + +#: ../../admin/index.en.rst:24 +msgid "" +"Apache Traffic Server™ speeds Internet access, enhances website " +"performance, and delivers unprecedented web hosting capabilities." +msgstr "" +"Apache Traffic Server™ はインターネットアクセスを加速させ、ウェブサイトのパ" +"フォーマンスを高め、かつて無いウェブホスティング性能を提供します。" + +#: ../../admin/index.en.rst:27 +msgid "This chapter discusses how:" +msgstr "この章は次のようなトピックについて書いてあります。" + +#: ../../admin/index.en.rst:29 +msgid "Contents:" +msgstr "内容:" + +#: ../../admin/index.en.rst:54 +msgid "What Is Apache Traffic Server?" +msgstr "Apache Traffic Server とは" + +#: ../../admin/index.en.rst:56 +msgid "" +"Global data networking has become part of everyday life: Internet users " +"request billions of documents and terabytes of data, on a daily basis, to " +"and from all parts of the world. Information is free, abundant, and " +"accessible. Unfortunately, global data networking can also be a nightmare " +"for IT professionals as they struggle with overloaded servers and congested " +"networks. It can be challenging to consistently and reliably accommodate " +"society’s growing data demands." +msgstr "" +"グローバルなデータネットワークの利用は日常生活の一部となりました。インター" +"ネットユーザーは日常生活の基盤の上で数10億ものドキュメントやテラバイトの" +"データを世界の隅から隅へリクエストします。不幸なことに、グローバルデータネッ" +"トワーキングは過負荷なサーバーや混雑したネットワークと格闘している IT 専門家" +"にとっては悪夢です。増え続けるデータ需要を絶えず、期待通りに動くように対応す" +"ることはチャレンジングなことです。" + +#: ../../admin/index.en.rst:64 +msgid "" +"Traffic Server is a high-performance web proxy cache that improves network " +"efficiency and performance by caching frequently-accessed information at " +"the edge of the network. This brings content physically closer to end " +"users, while enabling faster delivery and reduced bandwidth use. Traffic " +"Server is designed to improve content delivery for enterprises, Internet " +"service providers (ISPs), backbone providers, and large intranets by " +"maximizing existing and available bandwidth." +msgstr "" +"Traffic Server は高性能なウェブプロキシーキャッシュです。頻繁にアクセスされ" +"る情報をネットワークの端でキャッシュすることによりネットワーク効率とパフォー" +"マンスを改善します。これは物理的にエンドユーザーに近いコンテンツを届け、高速" +"な配信と帯域使用量の削減を可能とします。Traffic Server は商用のコンテンツ配" +"信やインターネットサービスプロバイダー( ISP )やバックボーンプロバイダーや巨" +"大なイントラネットを現行の利用可能な帯域を最大化することで改善するようにデザ" +"インされています。" + +#: ../../admin/index.en.rst:73 +msgid "Traffic Server Deployment Options" +msgstr "Traffic Server デプロイメントオプション" + +#: ../../admin/index.en.rst:75 +msgid "" +"To best suit your needs, Traffic Server can be deployed in several ways:" +msgstr "" +"必要に応じて、Traffic Server はいくつかの方法で配置することができます。" + +#: ../../admin/index.en.rst:77 +msgid "As a web proxy cache" +msgstr "ウェブプロキシーキャッシュとして" + +#: ../../admin/index.en.rst:78 +msgid "As a reverse proxy" +msgstr "リバースプロキシーとして" + +#: ../../admin/index.en.rst:79 +msgid "In a cache hierarchy" +msgstr "キャッシュ階層の一部として" + +#: ../../admin/index.en.rst:81 +msgid "" +"The following sections provide a summary of these Traffic Server deployment " +"options. Please keep in mind that with every of these options Traffic " +"Server can be run as a *single instance*, or as a *multi-node cluster*." +msgstr "" +"次のセクションではこれらの Traffic Server のデプロイメントオプションの概要を" +"説明します。これらのすべてのプションで Traffic Server は *シングルインスタン" +"ス* としても *マルチノードクラスター* としても動作することを覚えておいて下さ" +"い。" + +#: ../../admin/index.en.rst:87 +msgid "Traffic Server as a Web Proxy Cache" +msgstr "ウェブプロキシーキャッシュとしての Traffic Server" + +#: ../../admin/index.en.rst:89 +msgid "" +"As a web proxy cache, Traffic Server receives user requests for web content " +"as those requests travel to the destined web server (origin server). If " +"Traffic Server contains the requested content, then it serves the content " +"directly. If the requested content is not available from cache, then " +"Traffic Server acts as a proxy: it obtains the content from the origin " +"server on the user’s behalf and also keeps a copy to satisfy future " +"requests." +msgstr "" +"ウェブプロキシーキャッシュとして Traffic Server はウェブコンテンツへのユー" +"ザーリクエストを受け取り、宛先のウェブサーバー(オリジンサーバー)へ届けます。" +"Traffic Server がリクエストされたコンテンツを持っている場合、そのコンテンツ" +"を直接配信します。リクエストされたコンテンツがキャッシュから使えない場合、" +"Traffic Server はプロキシーとして振る舞います。つまり、コンテンツをユーザー" +"に代わってコンテンツを取得し、また将来のリクエストを満たすためにコピーを保持" +"します。" + +#: ../../admin/index.en.rst:97 +msgid "" +"Traffic Server provides explicit proxy caching, in which the user’s client " +"software must be configured to send requests directly to Traffic Server. " +"Explicit proxy caching is described in the :ref:`explicit-proxy-caching` " +"chapter." +msgstr "" +"Traffic Server は明確なプロキシーキャッシュを提供します。この場合ユーザーの" +"クライアントソフトウェアは Traffic Server に直接リクエストを送るように設定さ" +"れていなければなりません。明確なプロキシーキャッシュについては :ref:" +"`explicit-proxy-caching` の章で述べています。" + +#: ../../admin/index.en.rst:104 +msgid "Traffic Server as a Reverse Proxy" +msgstr "リバースプロキシーとしての Traffic Server" + +#: ../../admin/index.en.rst:106 +msgid "" +"As a reverse proxy, Traffic Server is configured to be the origin server to " +"which the user is trying to connect (typically, the origin server’s " +"advertised hostname resolves to Traffic Server, which acts as the real " +"origin server). The reverse proxy feature is also called server " +"acceleration. Reverse proxy is described in more detail in :ref:`reverse-" +"proxy-and-http-redirects`." +msgstr "" +"リバースプロキシーとして Traffc Server はユーザーが接続しようとするオリジン" +"サーバーとして設定されています。(一般的に、オリジンサーバーとして公開された" +"ホスト名は Traffic Server に解決され、実際のオリジンサーバーのように振る舞い" +"ます。)リバースプロキシー機能はサーバーアクセラレーションとも呼ばれます。リ" +"バースプロキシーは :ref:`reverse-proxy-and-http-redirects` で詳しく述べられ" +"ています。" + +#: ../../admin/index.en.rst:113 +msgid "Traffic Server in a Cache Hierarchy" +msgstr "キャッシュ階層での Traffic Server" + +#: ../../admin/index.en.rst:115 +msgid "" +"Traffic Server can participate in flexible cache hierarchies, in which " +"Internet requests not fulfilled from one cache are routed to other regional " +"caches, thereby leveraging the contents and proximity of nearby caches. In " +"a hierarchy of proxy servers, Traffic Server can act either as a parent or " +"a child cache to other Traffic Server systems or to similar caching " +"products." +msgstr "" +"Traffic Server は柔軟にキャッシュヒエラルキーに参加することができます。その" +"中で1つのキャッシュからは満たされないインターネットリクエストは他の局地的な" +"キャッシュに送られます。プロキシーサーバーの階層の中で Traffic Server は他の" +"Traffic Server システムや似たキャッシングプロダクトの親や子として振る舞いま" +"す。" + +#: ../../admin/index.en.rst:122 +msgid "" +"Traffic Server supports ICP (Internet Cache Protocol) peering. Hierarchical " +"caching is described in more detail in :ref:`hierarchical-caching`." +msgstr "" +"Traffic Server は ICP(Internet Cache Protocol) のピアイングをサポートしてい" +"ます。階層的キャッシュは :ref:`hierarchical-caching` で詳しく述べられていま" +"す。" + +#: ../../admin/index.en.rst:126 +msgid "Deployment Limitations" +msgstr "Deployment Limitations" + +#: ../../admin/index.en.rst:128 +msgid "" +"There's a number of deployment options that Traffic Server does not support " +"right out of the box. Such funcionality may be implemented in a plugin, but " +"in some cases Traffic Server's internal APIs or architectural restrictions " +"won't make it easy:" +msgstr "" +"Traffic Server がサポートしていないデプロイメントオプションがいくつかありま" +"す。そのような機能はプラグインで実装されていることもあります。またいくつかの" +"場合 Traffic Server の内部的な API やアーキテクチャ的な制限が実装を難しくし" +"ていることもあります。" + +#: ../../admin/index.en.rst:132 +msgid "" +"Load Balancing - note that there is an experimental plugin for this: :ref:" +"`balancer-plugin`." +msgstr "" +"ロードバランシング - このための実験的なプラクインがあることに注意してくださ" +"い。 :ref:`balancer-plugin`" + +#: ../../admin/index.en.rst:135 +msgid "Traffic Server Components" +msgstr "Traffic Server Components" + +#: ../../admin/index.en.rst:137 +msgid "" +"Traffic Server consists of several components that work together to form a " +"web proxy cache you can easily monitor and configure. These main components " +"are described below." +msgstr "" +"Traffic Server は簡単に監視や設定が行えるウェブプロキシーキャッシュを構成す" +"るために協力するいくつかのコンポーネントから成り立っています。これらのコン" +"ポーネントについて次に述べます。" + +#: ../../admin/index.en.rst:142 +msgid "The Traffic Server Cache" +msgstr "The Traffic Server Cache" + +#: ../../admin/index.en.rst:144 +msgid "" +"The Traffic Server cache consists of a high-speed object database called " +"the object store. The object store indexes objects according to URLs and " +"associated headers. Using sophisticated object management, the object store " +"can cache alternate versions of the same object (perhaps in a different " +"language or encoding type). It can also efficiently store very small and " +"very large objects, thereby minimizing wasted space. When the cache is " +"full, Traffic Server removes stale data to ensure that the most requested " +"objects are readily available and fresh." +msgstr "" +"Traffic Server キャッシュはオブジェクトストアと呼ばれるハイスピードオブジェ" +"クトデータベースによって構成されます。オブジェクトは URL と関連するヘッダー" +"に基づいたインデックスオブジェクトを保存します。洗練されたオブジェクト管理に" +"より、オブジェクトストアは同じオブジェクトの(言語やエンコーディングタイプな" +"どが)異なるバージョンをキャッシュすることができます。これは無駄なスペースを" +"最小化することによって、とても小さかったり、大きかったりするオブジェクトを効" +"率的に保存することもできます。キャッシュがいっぱいになった場合、Traffic " +"Server は最もリクエストされるオブジェクトがすぐに利用可能で新しい状態である" +"ことを保証するために、古いデータを削除します。" + +#: ../../admin/index.en.rst:153 +msgid "" +"Traffic Server is designed to tolerate total disk failures on any of the " +"cache disks. If the disk fails completely, then Traffic Server marks the " +"entire disk as corrupt and continues to use remaining disks. If all of the " +"cache disks fail, then Traffic Server switches to proxy-only mode. You can " +"partition the cache to reserve a certain amount of disk space for storing " +"data for specific protocols and origin servers. For more information about " +"the cache, see :ref:`configuring-the-cache`." +msgstr "" +"Traffic Server はすべてのキャッシュディスクのあらゆるディスク不良を許容する" +"ようにデザインされています。完全にディスクが壊れてしまった場合、Traffic " +"Server はそのディスクを破損したと印をつけ、残りのディスクを使い続けます。す" +"べてのディスクが壊れた場合、Traffic Server は proxy-only モードに切り替わり" +"ます。特定のプロトコルやオリジンサーバーのデータを保存するための一定のディス" +"クスペースを予約するためにキャッシュを分割することができます。キャッシュに関" +"するより詳しい情報は :ref:`configuring-the-cache` を参照してください。" + +#: ../../admin/index.en.rst:162 +msgid "The RAM Cache" +msgstr "The RAM Cache" + +#: ../../admin/index.en.rst:164 +msgid "" +"Traffic Server maintains a small RAM cache that contains extremely popular " +"objects. This RAM cache serves the most popular objects as fast as possible " +"and reduces load on disks, especially during temporary traffic peaks. You " +"can configure the RAM cache size to suit your needs; for detailed " +"information, refer to :ref:`changing-the-size-of-the-ram-cache`." +msgstr "" +"Traffic Server はとても頻繁にアクセスされるオブジェクトを含む小さな RAM " +"キャッシュを持っています。特に一時的なトラフィックのピークの間に、この RAM " +"キャッシュは最もポピュラーなオブジェクトを可能な限り速く提供し、ディスクから" +"のロードを減らします。この RAM キャッシュのサイズは必要な量に設定することが" +"できます。より詳しい情報は :ref:`changing-the-size-of-the-ram-cache` を参照" +"してください。" + +#: ../../admin/index.en.rst:171 +msgid "The Host Database" +msgstr "The Host Database" + +#: ../../admin/index.en.rst:173 +msgid "" +"The Traffic Server host database stores the domain name server (DNS) " +"entries of origin servers to which Traffic Server connects to fulfill user " +"requests. This information is used to adapt future protocol interactions " +"and optimize performance. Along with other information, the host database " +"tracks:" +msgstr "" +"Traffic Server は Traffic Server がユーザーリクエストを満たすために接続する" +"オリジンサーバーのドメインネームサーバー(DNS) のエントリを保存するデーター" +"ベースをホストします。この情報は将来のプロトコルインタラクションへの対応とパ" +"フォーマンスの最適化のために使われます。加えて、ホストデータベースは次の情報" +"を保存します。" + +#: ../../admin/index.en.rst:179 +msgid "DNS information (for fast conversion of hostnames to IP addresses)" +msgstr "DNS 情報(ホストネームから IP アドレスを高速に引くため)" + +#: ../../admin/index.en.rst:180 +msgid "" +"The HTTP version of each host (so advanced protocol features can be used " +"with hosts running modern servers)" +msgstr "" +"各ホストの HTTP バージョン(最新のプロトコルの機能はモダンなサーバーで使われ" +"ているかもしれないため)" + +#: ../../admin/index.en.rst:182 +msgid "" +"Host reliability and availability information (so users will not wait for " +"servers that are not running)" +msgstr "" +"信頼性と可用性の情報(ユーザーが起動していないサーバーを待つことがないように)" + +#: ../../admin/index.en.rst:186 +msgid "The DNS Resolver" +msgstr "The DNS Resolver" + +#: ../../admin/index.en.rst:188 +msgid "" +"Traffic Server includes a fast, asynchronous DNS resolver to streamline " +"conversion of hostnames to IP addresses. Traffic Server implements the DNS " +"resolver natively by directly issuing DNS command packets rather than " +"relying on slower, conventional resolver libraries. Since many DNS queries " +"can be issued in parallel and a fast DNS cache maintains popular bindings " +"in memory, DNS traffic is reduced." +msgstr "" +"Traffic Server はホスト名から IP アドレスへの変換を統合するために、高速で非" +"同期な DNS リゾルバも含んでいます。Traffic Server は遅くて月並みなリゾルバラ" +"イブラリに渡すよりも、直接 DNS コマンドパケットを渡すことによって、DNS リゾ" +"ルバをネイティブに実行します。多くの DNS クエリが並列で渡され、高速な DNS " +"キャッシュがポピュラーなバインディングをメモリに保存することにより、DNS トラ" +"フィックは減ります。" + +#: ../../admin/index.en.rst:196 +msgid "Traffic Server Processes" +msgstr "Traffic Server Processes" + +#: ../../admin/index.en.rst:198 +msgid "" +"Traffic Server contains three processes that work together to serve " +"requests and manage/control/monitor the health of the system. The three " +"processes are described below:" +msgstr "" +"Traffic Server は 3 つのプロセスから構成されます。それらはリクエストを返し、" +"システムの状態を管理/制御/監視するために協調して動きます。この 3 つのプロセ" +"スは下に説明されています。" + +#: ../../admin/index.en.rst:202 +msgid "" +"The :program:`traffic_server` process is the transaction processing engine " +"of Traffic Server. It is responsible for accepting connections, processing " +"protocol requests, and serving documents from the cache or origin server." +msgstr "" +":program:`traffic_server` プロセスは Traffic Server のトランザクションプロ" +"セッシングエンジンです。コネクションをアクセプトしたり、プロトコルリクエスト" +"を処理したり、キャッシュやオリジンサーバーからドキュメントを提供することに責" +"任を持ちます。" + +#: ../../admin/index.en.rst:207 +msgid "" +"The :program:`traffic_manager` process is the command and control facility " +"of the Traffic Server, responsible for launching, monitoring, and " +"reconfiguring the :program:`traffic_server` process. The :program:" +"`traffic_manager` process is also responsible for the proxy " +"autoconfiguration port, the statistics interface, cluster administration, " +"and virtual IP failover." +msgstr "" +":program:`traffic_manager` プロセスは Traffic Server への命令と管理機能で" +"す。起動や監視と :program:`traffic_server` プロセスを再設定したりすることに" +"責任を持ちます。 :program:`traffic_manager` プロセスはプロキシオートコンフィ" +"ギュレーションポートや統計のインターフェイスやクラスター管理とバーチャル IP " +"フェイルオーバーについても責任を持ちます。" + +#: ../../admin/index.en.rst:214 +msgid "" +"If the :program:`traffic_manager` process detects a :program:" +"`traffic_server` process failure, it instantly restarts the process but " +"also maintains a connection queue of all incoming requests. All incoming " +"connections that arrive in the several seconds before full server restart " +"are saved in the connection queue and processed in first-come, first-served " +"order. This connection queueing shields users from any server restart " +"downtime." +msgstr "" +":program:`traffic_manager` プロセスが :program:`traffic_server` プロセスが失" +"敗していることを検知した場合、即座にプロセスを再起動するだけでなく、すべての" +"リクエストのコネクションキューをメンテナンスします。サーバーが完全に再起動す" +"る数秒前に到着したすべてのインカミングコネクションはコネクションキューに格納" +"され、最初に来たものから順に処理されます。このコネクションキューはすべての" +"サーバーの再起動の際のダウンタイムからユーザーを守ります。" + +#: ../../admin/index.en.rst:222 +msgid "" +"The :program:`traffic_cop` process monitors the health of both the :program:" +"`traffic_server` and :program:`traffic_manager` processes. The :program:" +"`traffic_cop` process periodically (several times each minute) queries the :" +"program:`traffic_server` and :program:`traffic_manager` process by issuing " +"heartbeat requests to fetch synthetic web pages. In the event of failure " +"(if no response is received within a timeout interval or if an incorrect " +"response is received), :program:`traffic_cop` restarts the :program:" +"`traffic_manager` and :program:`traffic_server` processes." +msgstr "" +":program:`traffic_cop` プロセスは :program:`traffic_server` と :program:" +"`traffic_manager` プロセスの両方の状態をモニターします。 :program:" +"`traffic_cop` プロセスは定期的(毎分数回)に静的なウェブページを取得するハート" +"ビートリクエストを渡すことで :program:`traffic_server` と :program:" +"`traffic_manager` に問い合わせます。失敗したとき(一定期間の間にレスポンスが" +"帰って来ないときや不正なレスポンスを受け取ったとき) :program:`traffic_cop` " +"は :program:`traffic_manager` と :program:`traffic_server` プロセスを再起動" +"します。" + +#: ../../admin/index.en.rst:231 +msgid "The figure below illustrates the three Traffic Server processes." +msgstr "次の図は Traffic Server の3つのイラストです。" + +#: ../../admin/index.en.rst:237 +msgid "Illustration of the three Traffic Server Processes" +msgstr "Illustration of the three Traffic Server Processes" + +#: ../../admin/index.en.rst:240 +msgid "Administration Tools" +msgstr "Administration Tools" + +#: ../../admin/index.en.rst:242 +msgid "Traffic Server offers the following administration options:" +msgstr "Traffic Server は次の管理オプションを提供しています。" + +#: ../../admin/index.en.rst:244 +msgid "" +"The Traffic Line command-line interface is a text-based interface from " +"which you can monitor Traffic Server performance and network traffic, as " +"well as configure the Traffic Server system. From Traffic Line, you can " +"execute individual commands or script a series of commands in a shell." +msgstr "" +"Traffic Line コマンドラインインターフェイスはテキストベースのインターフェー" +"スです。Traffic Server のパフォーマンスとネットワークトラフィックを監視でき" +"ます。また同じように、Traffic Server システムを設定することもできます。" +"Traffic Line によって独立したコマンドや一連のコマンドのスクリプトをシェルで" +"実行することができます。" + +#: ../../admin/index.en.rst:249 +msgid "" +"The Traffic Shell command-line interface is an additional command-line tool " +"that enables you to execute individual commands that monitor and configure " +"the Traffic Server system." +msgstr "" +"Traffic Shell コマンドラインインターフェイスは追加のコマンドラインツールで、" +"Traffic Server システムを監視したり設定したりする独立したコマンドを実行する" +"ことができます。Traffic Line や Traffic Shell を通じたどんな変更も自動的に設" +"定ファイルを作ります。" + +#: ../../admin/index.en.rst:252 +msgid "" +"Various configuration files enable you to configure Traffic Server through " +"a simple file-editing and signal-handling interface. Any changes you make " +"through Traffic Line or Traffic Shell are automatically made to the " +"configuration files as well." +msgstr "" +"様々な設定ファイルはシンプルなファイル編集とシグナルハンドリングインター" +"フェースを通して、Traffic Server を設定することを可能とします。Traffic Line " +"か Traffic Shell を通じたどのような変更でも自動的に設定ファイルに書き込まれ" +"ます。" + +#: ../../admin/index.en.rst:256 +msgid "" +"Finally there is a clean C API which can be put to good use from a " +"multitude of languages. The Traffic Server Admin Client demonstrates this " +"for Perl." +msgstr "" +"最後に、多くの言語から使うことのできるクリーンな C API があります。 Traffic " +"Server Admin Client は Perl でこのことを示しています。" + +#: ../../admin/index.en.rst:261 +msgid "Traffic Analysis Options" +msgstr "Traffic Analysis Options" + +#: ../../admin/index.en.rst:263 +msgid "" +"Traffic Server provides several options for network traffic analysis and " +"monitoring:" +msgstr "" +"Traffic Server はネットワークトラフィックの分析と監視のためのいくつかのオプ" +"ションを提供しています。" + +#: ../../admin/index.en.rst:266 +msgid "" +"Traffic Line and Traffic Shell enable you to collect and process statistics " +"obtained from network traffic information." +msgstr "" +"Traffic Line と Traffic Shell はネットワークトラフィック情報から入手した統計" +"情報を集めて処理することを可能にします。" + +#: ../../admin/index.en.rst:269 +msgid "" +"Transaction logging enables you to record information (in a log file) about " +"every request Traffic Server receives and every error it detects. By " +"analyzing the log files, you can determine how many clients used the " +"Traffic Server cache, how much information each of them requested, and what " +"pages were most popular. You can also see why a particular transaction was " +"in error and what state the Traffic Server was in at a particular time; for " +"example, you can see that Traffic Server was restarted or that cluster " +"communication timed out." +msgstr "" +"トランザクションロギングは Traffic Server が処理したへすべてのリクエストとす" +"べての検知したエラーの情報を (ログファイルの中に) 記録することを可能にしま" +"す。ログファイルを分析することによって、どれほどのクライアントが Traffic " +"Sever キャッシュを使用し、どれくらいの情報がリクエストされ、どのページがポ" +"ピュラーなのかを確認することができます。特定のトランザクションがなぜエラーに" +"なり、そのときの Traffic Server の状態がどうだったのかみることもできます。例" +"えば Traffic Server が再起動したときや、クラスターコミュニケーションがタイム" +"アウトしたときなどです。" + +#: ../../admin/index.en.rst:278 +msgid "" +"Traffic Server supports several standard log file formats, such as Squid " +"and Netscape, and its own custom format. You can analyze the standard " +"format log files with off-the-shelf analysis packages. To help with log " +"file analysis, you can separate log files so that they contain information " +"specific to protocol or hosts." +msgstr "" +"Traffic Server は Squid や Netscape などのいくつかの標準的なログフォーマット" +"や固有のフォーマットをサポートしています。off-the-shelf 分析パッケージによっ" +"て標準的なフォーマットのログを分析することができます。ログファイルの分析を助" +"けるために、特定のプロトコルやホストの情報を含むようにログファイルを分割する" +"ことができます。" + +#: ../../admin/index.en.rst:284 +msgid "" +"Traffic analysis options are described in more detail in :ref:`monitoring-" +"traffic`." +msgstr "" +"トラフィック分析オプションは :ref:`monitoring-traffic` により詳しく書かれて" +"います。" + +#: ../../admin/index.en.rst:286 +msgid "" +"Traffic Server logging options are described in :ref:`working-with-log-" +"files`." +msgstr "" +"Traffic Server ロギングオプションは :ref:`working-with-log-files` に書かれて" +"います。" + +#: ../../admin/index.en.rst:289 +msgid "Traffic Server Security Options" +msgstr "Traffic Server Security Options" + +#: ../../admin/index.en.rst:291 +msgid "" +"Traffic Server provides numerous options that enable you to establish " +"secure communication between the Traffic Server system and other computers " +"on the network. Using the security options, you can do the following:" +msgstr "" +"Traffic Server は Traffic Server システムと他のコンピュータネットワーク間の" +"セキュアな通信を確立することを可能にする多数のオプションを提供しています。セ" +"キュリティオプションを使うことによって、次のことが可能です。" + +#: ../../admin/index.en.rst:296 +msgid "Control client access to the Traffic Server proxy cache." +msgstr "Traffic Server プロキシーキャッシュにアクセスするクライアントの管理" + +#: ../../admin/index.en.rst:297 +msgid "" +"Configure Traffic Server to use multiple DNS servers to match your site's " +"security configuration. For example, Traffic Server can use different DNS " +"servers, depending on whether it needs to resolve hostnames located inside " +"or outside a firewall. This enables you to keep your internal network " +"configuration secure while continuing to provide transparent access to " +"external sites on the Internet." +msgstr "" +"あなたのサイトのセキュリティ設定に合わせて複数の DNS サーバーを使うような " +"Traffic Server の設定。例えば Traffic Server はホストネームを解決する必要が" +"あるのがファイアーウォールの内側か外側かによって異なる DNS サーバーを使うこ" +"とができます。これは透過的にインターネット上の外部サイトにアクセスすることを" +"提供しつつ、インターナルネットワーク設定をセキュアに保つことを可能にします。" + +#: ../../admin/index.en.rst:303 +msgid "" +"Configure Traffic Server to verify that clients are authenticated before " +"they can access content from the Traffic Server cache." +msgstr "" +"クライアントが Traffic Server キャッシュからコンテンツにアクセスできるように" +"なる前に、クライアントが認証されていることを検証する Traffic Server 設定" + +#: ../../admin/index.en.rst:305 +msgid "" +"Secure connections in reverse proxy mode between a client and Traffic " +"Server, and Traffic Server and the origin server, using the SSL termination " +"option." +msgstr "" +"SSL ターミネーションオプションを使うことによる、リバースプロキシーモードでの" +"クライアントと Traffic Server 間と Traffic Server とオリジンサーバー間の安全" +"な接続" + +#: ../../admin/index.en.rst:308 +msgid "Control access via SSL (Secure Sockets Layer)." +msgstr "SSL (Secure Socket Layer) によるアクセスの管理" + +#: ../../admin/index.en.rst:310 +msgid "" +"Traffic Server security options are described in more detail in :ref:" +"`security-options`." +msgstr "" +"Traffic Server セキュリティオプションは :ref:`security-options` に詳しく述べ" +"られています。" + +#: ../../admin/index.en.rst:314 +msgid "Tuning Traffic Server" +msgstr "Traffic Server のチューニング" + +#: ../../admin/index.en.rst:316 +msgid "" +"Finally this last chapter on :ref:`performance-tuning` discusses the vast " +"number of options that allow to optimally tune Apache Traffic Server for " +"maximum performance." +msgstr "" +"最後にこの最終章 :ref:`performance-tuning` で Apache Traffic Server が最大限" +"のパフォーマンスを発揮するように最適なチューニングをするための膨大な数のオプ" +"ションについて論じます。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/monitoring-traffic.en.po b/doc/locale/ja/LC_MESSAGES/admin/monitoring-traffic.en.po new file mode 100644 index 00000000..c0276269 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/monitoring-traffic.en.po @@ -0,0 +1,157 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/monitoring-traffic.en.rst:5 +msgid "Monitoring Traffic" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:24 +msgid "" +"Traffic Server provides several options for monitoring system performance " +"and analyzing network traffic." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:31 +msgid "Traffic Server Monitoring Tools" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:33 +msgid "" +"Traffic Server provides the following tools to monitor system performance " +"and analyze network traffic:" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:36 +msgid "" +"Traffic Server can send email that's triggered by alarms that signal any " +"detected failure conditions; refer to `Working with Traffic Manager " +"Alarms`_." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:38 +msgid "" +"The Traffic Line command-line interface provides an alternative method of " +"viewing Traffic Server performance and network traffic information; refer " +"to `Viewing Statistics from Traffic Line`_." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:41 +msgid "" +"The Traffic Shell command-line tool provides yet another alternative method " +"of viewing Traffic Server performance and network traffic information; " +"refer to `Starting Traffic Shell <../getting-started#StartTrafficShell>`_." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:48 +msgid "Working with Traffic Manager Alarms" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:50 +msgid "" +"Traffic Server signals an alarm when it detects a problem. For example, the " +"space allocated to event logs could be full or Traffic Server may not be " +"able to write to a configuration file." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:55 +msgid "Configuring Traffic Server to Email Alarms" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:57 +msgid "" +"To configure Traffic Server to send an email to a specific address whenever " +"an alarm occurs, follow the steps below:" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:60 +msgid "In the :file:`records.config` file" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:61 +msgid "" +"Set the :ts:cv:`proxy.config.alarm_email` variable to the email address " +"alarms will be routed to." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:62 +msgid "" +"Run the command :option:`traffic_line -x` to apply the configuration " +"changes." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:66 +msgid "Using a Script File for Alarms" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:68 +msgid "" +"Alarm messages are built into Traffic Server - you cannot change them. " +"However, you can write a script file to execute certain actions when an " +"alarm is signaled. Traffic Server provides a sample script file named " +"``example_alarm_bin.sh`` in the ``bin`` directory; simply modify the file " +"to suit your needs." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:75 +msgid "Viewing Statistics from Traffic Line" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:77 +msgid "" +"You can use the Traffic Line command-line interface to view statistics " +"about Traffic Server performance and web traffic. In addition to viewing " +"statistics, you can also configure, stop, and restart the Traffic Server " +"system. For additional information, refer to :ref:`configure-using-traffic-" +"line` and :ref:`traffic-line-commands`. You can view specific information " +"about a Traffic Server node or cluster by specifying the variable that " +"corresponds to the statistic you want to see." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:86 +msgid "**To view a statistic**, enter the following command:::" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:90 +msgid "" +"where ``variable`` is the variable representing the information you want to " +"view. For a list of variables you can specify, refer to :ref:`Traffic Line " +"Variables `." +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:94 +msgid "" +"For example, the following command displays the document hit rate for the " +"Traffic Server node:::" +msgstr "" + +#: ../../admin/monitoring-traffic.en.rst:99 +msgid "" +"If the Traffic Server ``bin`` directory is not in your path, then prepend " +"the Traffic Line command with ``./`` (for example: :option:`traffic_line -" +"r` ``variable``)." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/admin/performance-tuning.en.po b/doc/locale/ja/LC_MESSAGES/admin/performance-tuning.en.po new file mode 100644 index 00000000..8a93b55a --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/performance-tuning.en.po @@ -0,0 +1,72 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-01-15 22:31+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/performance-tuning.en.rst:5 +msgid "Performance Tuning" +msgstr "" + +#: ../../admin/performance-tuning.en.rst:29 +msgid "Before you start" +msgstr "" + +#: ../../admin/performance-tuning.en.rst:31 +msgid "" +"There is no single option to that will guarantee maximum performance of " +"Apache Traffic Server in every use-case. There are however numerous options " +"that help tune its performance under different loads and in its - often " +"vastly different - use-cases." +msgstr "" + +#: ../../admin/performance-tuning.en.rst:37 +msgid "Building Traffic Server" +msgstr "" + +#: ../../admin/performance-tuning.en.rst:39 +msgid "" +"A lot of speed can be gained or lost depending on the way ATS is built." +msgstr "" + +#: ../../admin/performance-tuning.en.rst:42 +msgid "Tuning the Machine" +msgstr "" + +#: ../../admin/performance-tuning.en.rst:45 +msgid "Operating Systems Options" +msgstr "" + +#: ../../admin/performance-tuning.en.rst:48 +msgid "Optimal Use of Memory" +msgstr "" + +#: ../../admin/performance-tuning.en.rst:51 +msgid "Tuning different Thread types" +msgstr "" + +#: ../../admin/performance-tuning.en.rst:54 +msgid "Tuning Plugin Execution" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/admin/reverse-proxy-http-redirects.en.po b/doc/locale/ja/LC_MESSAGES/admin/reverse-proxy-http-redirects.en.po new file mode 100644 index 00000000..03b141a1 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/reverse-proxy-http-redirects.en.po @@ -0,0 +1,626 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:5 +#: ../../admin/reverse-proxy-http-redirects.en.rst:26 +msgid "Reverse Proxy and HTTP Redirects" +msgstr "リバースプロキシーと HTTP リダイレクト" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:28 +msgid "" +"As a reverse proxy cache, Traffic Server serves requests on behalf of " +"origin servers. Traffic Server is configured in such a way that it appears " +"to clients like a normal origin server." +msgstr "" +"リバースプロキシーキャッシュとして、Traffic Server はオリジンサーバーの代わ" +"りにリクエストに応えます。Traffic Server はクライアントには通常のオリジン" +"サーバーに見えるように設定されます。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:37 +msgid "Understanding Reverse Proxy Caching" +msgstr "リバースプロキシーキャッシュを理解する" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:39 +msgid "" +"With **forward proxy caching**, Traffic Server handles web requests to " +"distant origin servers on behalf of the clients requesting the content. " +"**Reverse proxy caching** (also known as **server acceleration** or " +"**virtual web hosting**) is different because Traffic Server acts as a " +"proxy cache on behalf of the origin servers that store the content. Traffic " +"Server is configured to be *the* origin server which the client is trying " +"to connect to. In a typical scenario the advertised hostname of the origin " +"server resolves to Traffic Server, which acts as the real origin server." +msgstr "" +"\\ **フォワードプロキシーキャッシュ**\\ の場合、Traffic Server はコンテンツ" +"をリクエストしたクライアントに代わって遠くはなれたオリジンサーバーへのリクエ" +"ストを取り扱います。\\ **リバースプロキシーキャッシュ**\\ (\\ **サーバーアク" +"セラレーション**\\ や\\ **バーチャルウェブホスティング**\\ としても知られて" +"います)では Traffic Server がコンテンツを持っているオリジンサーバーに代わっ" +"てプロキシーキャッシュとして振る舞う点が異なります。Traffic Server はクライ" +"アントが接続しようとしている\\ *その*\\ オリジンサーバーとなるように設定され" +"ます。典型的なシナリオとしては、オリジンサーバーの広告されたホスト名が本当の" +"オリジンサーバーのように振る舞う Traffic Server へ解決されます。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:50 +msgid "Reverse Proxy Solutions" +msgstr "リバースプロキシーによる対応策" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:52 +msgid "" +"There are many ways to use Traffic Server as a reverse proxy. Below are a " +"few example scenarios." +msgstr "" +"Traffic Server をリバースプロキシーとして使う方法はたくさんあります。以下は" +"いくつかの例です。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:55 +msgid "You can use Traffic Server in reverse proxy mode to:" +msgstr "Traffic Server をリバースプロキシーモードとして使うことでできること" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:57 +msgid "Offload heavily-used origin servers" +msgstr "使用頻度の高いオリジンサーバーの負荷を軽減する" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:58 +msgid "Deliver content efficiently in geographically distant areas" +msgstr "地理的に離れた地域に効率的にコンテントを配信する" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:59 +msgid "Provide security for origin servers that contain sensitive information" +msgstr "センシティブな情報を含むオリジンサーバーにセキュリティを提供する" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:63 +msgid "Offloading Heavily-Used Origin Servers" +msgstr "使用頻度の高いオリジンサーバーの負荷の軽減" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:65 +msgid "" +"Traffic Server can absorb requests to the main origin server and improve " +"the speed & quality of web serving by reducing load and hot spots on backup " +"origin servers. For example, a web hoster can maintain a scalable Traffic " +"Server serving engine with a set of low-cost, low-performance, less-" +"reliable PC origin servers as backup servers. In fact, a single Traffic " +"Server can act as the virtual origin server for multiple backup origin " +"servers, as shown in the figure below." +msgstr "" +"Traffic Server は主となるオリジンサーバーへのリクエストを吸収できるほか、" +"バックアップオリジンサーバーの負荷とホットスポットを低減することでWeb の提供" +"スピードと品質を向上させます。例えば、Web をホストする人は低コストで低パ" +"フォーマンスで信頼性の低い PC によるオリジンサーバーをバックアップサーバーと" +"したエンジンを提供するスケーラブルな Traffic Server を維持することができま" +"す。実際、下記の図に示すように、1台の Traffic Server は複数のバックアップオ" +"リジンサーバーのために仮想的なオリジンサーバーとして振る舞うことが可能です。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:77 +msgid "Traffic Server as reverse proxy for a pair of origin servers" +msgstr "1組のオリジンサーバーのリバースプロキシーとして動く Traffic Server" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:80 +msgid "Delivering Content in Geographically-Dispersed Areas" +msgstr "分散した地域でのコンテンツの配信" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:82 +msgid "" +"Traffic Server can be used in reverse proxy mode to accelerate origin " +"servers that provide content to areas not located within close geographical " +"proximity. Caches are typically easier to manage and are more cost-" +"effective than replicating data. For example, Traffic Server can be used as " +"a mirror site on the far side of a trans-Atlantic link to serve users " +"without having to fetch the request and content across expensive " +"international connections. Unlike replication, for which hardware must be " +"configured to replicate all data and to handle peak capacity, Traffic " +"Server dynamically adjusts to optimally use the serving and storing " +"capacity of the hardware. Traffic Server is also designed to keep content " +"fresh automatically, thereby eliminating the complexity of updating remote " +"origin servers." +msgstr "" +"Traffic Server は地理的に近接していないエリアにコンテンツを提供するオリジン" +"サーバーを加速するためにリバースプロキシーモードで使用できます。キャッシュは" +"レプリケーションよりもコストパフォーマンスが高く、一般的により簡単です。例え" +"ば、高価な国をまたぐコネクションを使ってリクエストやコンテンツを取得すること" +"なく大西洋の向こう側のミラーサイトとして Traffic Server を利用することができ" +"ます。全データを複製しピークキャパシティも扱うレプリケーションとは異なり、" +"Traffic Server はハードウェアのキャパシティを最適に利用するよう動的に順応し" +"ます。Traffic Server は自動的にコンテンツを新鮮に保つようにも設計されている" +"ので、リモートオリジンサーバーをアップデートする複雑さも除去できます。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:96 +msgid "Providing Security for an Origin Server" +msgstr "オリジンサーバーへのセキュリティの提供" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:98 +msgid "" +"Traffic Server can be used in reverse proxy mode to provide security for an " +"origin server. If an origin server contains sensitive information that you " +"want to keep secure inside your firewall, then you can use a Traffic Server " +"outside the firewall as a reverse proxy for that origin server. When " +"outside clients try to access the origin server, the requests instead go to " +"Traffic Server. If the desired content is *not* sensitive, then it can be " +"served from the cache. If the content is sensitive and not cacheable, then " +"Traffic Server obtains the content from the origin server (the firewall " +"allows only Traffic Server access to the origin server). The sensitive " +"content resides on the origin server, safely inside the firewall." +msgstr "" +"Traffic Server はオリジンサーバーにセキュリティを提供するためにリバースプロ" +"キシーモードで使用できます。もしファイアーウォールの内側にあるオリジンサー" +"バーが安全にしておきたいセンシティブな情報を持っている場合、Traffic Server " +"をファイアーウォールの外側におき、そのオリジンサーバーのためのリバースプロキ" +"シーとして使用できます。外部のクライアントがそのオリジンサーバーにアクセスを" +"試みるとリクエストは Traffic Server に向かいます。もし求められたコンテンツが" +"センシティブでは\\ *ない*\\ 場合、それはキャッシュから提供されます。もしコン" +"テンツがセンシティブでありキャッシュ不可能な場合、Traffic Server はオリジン" +"サーバーからコンテンツを取得します(ファイアーウォールはオリジンサーバーへの" +"アクセスを Traffic Server にのみ許します)。センシティブなコンテンツは安全な" +"ファイアーウォールの内側のオリジンサーバー上にあります。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:111 +msgid "How Does Reverse Proxy Work?" +msgstr "リバースプロキシーの動作" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:113 +msgid "" +"When a browser makes a request, it normally sends that request directly to " +"the origin server. When Traffic Server is in reverse proxy mode, it " +"intercepts the request before it reaches the origin server. Typically, this " +"is done by setting up the DNS entry for the origin server (i.e., the origin " +"server's 'advertised' hostname) so it resolves to the Traffic Server IP " +"address. When Traffic Server is configured as the origin server, the " +"browser connects to Traffic Server rather than the origin server. For " +"additional information, see `HTTP Reverse Proxy`_." +msgstr "" +"ブラウザーがリクエストを行うとき、通常はリクエストを直接オリジンサーバーに送" +"信します。Traffic Server がリバースプロキシーモードになっているときはリクエ" +"ストがオリジンサーバーに届く前に Traffic Server が横取りします。通常これはオ" +"リジンサーバーの DNS エントリー(オリジンサーバーの'広告された'ホスト名)を " +"Traffic Server の IP アドレスに解決されるように設定すれば完了です。Traffic " +"Server がオリジンサーバーとして設定されている場合、ブラウザーはオリジンサー" +"バーではなく Traffic Server に接続します。より詳しくは `HTTP リバースプロキ" +"シー`_\\ を見てください。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:122 +msgid "" +"To avoid a DNS conflict, the origin server’s hostname and its advertised " +"hostname must not be the same." +msgstr "" +"DNS の衝突を避けるため、オリジンサーバーのホスト名とその広告されたホスト名は" +"同じであってはなりません。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:125 +msgid "HTTP Reverse Proxy" +msgstr "HTTP リバースプロキシー" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:127 +msgid "" +"In reverse proxy mode, Traffic Server serves HTTP requests on behalf of a " +"web server. The figure below illustrates how Traffic Server in reverse " +"proxy mode serves an HTTP request from a client browser." +msgstr "" +"リバースプロキシーモードでは、Traffic Server は HTTP リクエストをウェブサー" +"バーの代わりに受け取ります。下の図は リバースプロキシーモードの Traffic " +"Server がどのようにクライアントからの HTTP リクエストを受け取るのかを説明し" +"ています。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:135 +msgid "HTTP reverse proxy" +msgstr "HTTP リバースプロキシー" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:137 +msgid "The figure above demonstrates the following steps:" +msgstr "上の図は次のステップを説明しています。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:139 +msgid "" +"A client browser sends an HTTP request addressed to a host called ``www." +"host.com`` on port 80. Traffic Server receives the request because it is " +"acting as the origin server (the origin server’s advertised hostname " +"resolves to Traffic Server)." +msgstr "" +"クライアントブラウザが ``www.host.com`` の 80 番ポートに HTTP リクエストを送" +"信します。Traffic Server はオリジンサーバーとして振る舞っているのでこのリク" +"エストを受け取ります(オリジンサーバーの広告されたホスト名は Traffic Server " +"へ解決されるように広告されています)。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:143 +msgid "" +"Traffic Server locates a map rule in the ``remap.config`` file and remaps " +"the request to the specified origin server (``realhost.com``)." +msgstr "" +"Traffic Server は ``remap.config`` ファイル内にあるマップルールを見つけ、リ" +"クエストを指定されたオリジンサーバー(``realhost.com``)にリマップします。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:145 +msgid "" +"Traffic Server opens an HTTP connection to the origin server. (If the " +"request is not able to be served from cache)" +msgstr "" +"Traffic Server は オリジンサーバーへの HTTP コネクションを開きます。(もしリ" +"クエストにキャッシュから応えられなかった場合)" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:146 +msgid "" +"If the request is a cache hit and the content is fresh, then Traffic Server " +"sends the requested object to the client from the cache. Otherwise, Traffic " +"Server obtains the requested object from the origin server, sends the " +"object to the client, and saves a copy in its cache." +msgstr "" +"もしリクエストがキャッシュにヒットしコンテンツが新鮮であれば、Traffic " +"Server はリクエストされたオブジェクトをキャッシュからクライアントに送信しま" +"す。そうではなく、Traffic Server がオリジンサーバーからリクエストされたオブ" +"ジェクトを取得した場合は、そのオブジェクトをクライアントに送信し、コピーを" +"キャッシュに保存します。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:152 +msgid "To configure HTTP reverse proxy, you must perform the following tasks:" +msgstr "" +"HTTP リバースプロキシーを設定するためには、次のタスクを行う必要があります" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:154 +msgid "" +"Create mapping rules in the :file:`remap.config` file (refer to `Creating " +"Mapping Rules for HTTP Requests`_). ::" +msgstr "" +":file:`remap.config` ファイルにマッピングルールを作成する (`HTTP リクエスト" +"用マッピングルールの作成`_\\ を参照してください) 。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:160 +msgid "" +"Enable the reverse proxy option (refer to `Enabling HTTP Reverse Proxy`_)." +msgstr "" +"リバースプロキシーオプションを有効にする(`HTTP リバースプロキシーの有効化`_" +"\\ を参照してください)。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:162 +msgid "" +"In addition to the tasks above, you can also `Setting Optional HTTP Reverse " +"Proxy Options`_." +msgstr "" +"上のタスクに加え、\\ `省略可能な HTTP リバースプロキシーオプションの設定`_" +"\\ を行うこともできます。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:165 +msgid "Handling Origin Server Redirect Responses" +msgstr "オリジンサーバーのリダイレクトレスポンスを扱う" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:167 +msgid "" +"Origin servers often send redirect responses back to browsers redirecting " +"them to different pages. For example, if an origin server is overloaded, " +"then it might redirect browsers to a less loaded server. Origin servers " +"also redirect when web pages that have moved to different locations. When " +"Traffic Server is configured as a reverse proxy, it must readdress " +"redirects from origin servers so that browsers are redirected to Traffic " +"Server and *not* to another origin server." +msgstr "" +"オリジンサーバーはしばしばブラウザーを他のページにリダイレクトするためにリダ" +"イレクトレスポンスを返します。例えば、オリジンサーバーが過負荷になった場合に" +"は負荷の少ないサーバーへブラウザーをリダイレクトするかもしれません。オリジン" +"サーバーはウェブページが異なる場所に移動された場合にもリダイレクトを行いま" +"す。Traffic Server がリバースプロキシーとして設定されている場合、ブラウザー" +"が他のオリジンサーバー\\ *ではなく* Traffic Server にリダイレクトされるよう" +"に、リダイレクト先をオリジンサーバーから書き換えなければなりません。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:175 +msgid "" +"To readdress redirects, Traffic Server uses reverse-map rules. Unless you " +"have :ts:cv:`proxy.config.url_remap.pristine_host_hdr` enabled (the " +"default) you should generally set up a reverse-map rule for each map rule. " +"To create reverse-map rules, refer to `Using Mapping Rules for HTTP " +"Requests`_." +msgstr "" +"リダイレクト先を書き直すために、Traffic Server はリバースマップルールを使用" +"します。 :ts:cv:`proxy.config.url_remap.pristine_host_hdr` を有効にしていな" +"い限り(それがデフォルトです)、一般的には各マップルールに対してリバースマップ" +"ルールを用意すべきです。リバースマップルールを作成するには `HTTP リクエスト" +"用マッピングルールの使用`_\\ を参照してください。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:182 +msgid "Using Mapping Rules for HTTP Requests" +msgstr "HTTP リクエスト用マッピングルールの使用" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:184 +msgid "Traffic Server uses two types of mapping rules for HTTP reverse proxy." +msgstr "" +"Traffic Server は HTTP リバースプロキシー用に2タイプのマッピングルールを使用" +"します。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:187 +msgid "map rule" +msgstr "マップルール" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:189 +msgid "" +"A **map rule** translates the URL in client requests into the URL where the " +"content is located. When Traffic Server is in reverse proxy mode and " +"receives an HTTP client request, it first constructs a complete request URL " +"from the relative URL and its headers. Traffic Server then looks for a " +"match by comparing the complete request URL with its list of target URLs in " +"the :file:`remap.config` file. For the request URL to match a target URL, " +"the following conditions must be true:" +msgstr "" +"\\ **マップルール**\\ はクライアントのリクエストに含まれる URL をコンテンツ" +"が存在する場所に変換します。Traffic Server がリバースプロキシーモードで " +"HTTP クライアントリクエストを受け取ると、相対 URL とヘッダーから完全な URL " +"を組み立てます。そしてその完全な URL と :file:`remap.config` ファイル内の" +"ターゲット URL とを比較し、マッチするものを探します。ターゲット URL にマッチ" +"するリクエスト URL は次の条件を満たさなければなりません" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:198 +msgid "The scheme of both URLs must be the same" +msgstr "URL のスキームが同じであること" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:199 +msgid "" +"The host in both URLs must be the same. If the request URL contains an " +"unqualified hostname, then it will never match a target URL with a fully-" +"qualified hostname." +msgstr "" +"URL のホストが同じであること。もしリクエスト URL が修飾されていないホスト名" +"を含んでいる場合、完全修飾されたホスト名を含むターゲット URL にはマッチしま" +"せん。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:202 +msgid "" +"The ports in both URLs must be the same. If no port is specified in a URL, " +"then the default port for the scheme of the URL is used." +msgstr "" +"ポートが同じであること。もし URL にポートが指定されていない場合、その URL ス" +"キームのデフォルトのポートが使用されます。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:204 +msgid "" +"The path portion of the target URL must match a prefix of the request URL " +"path" +msgstr "" +"ターゲット URL のパス部分がリクエスト URL のパスの先頭と一致すること。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:207 +msgid "" +"If Traffic Server finds a match, then it translates the request URL into " +"the replacement URL listed in the map rule: it sets the host and path of " +"the request URL to match the replacement URL. If the URL contains path " +"prefixes, then Traffic Server removes the prefix of the path that matches " +"the target URL path and substitutes it with the path from the replacement " +"URL. If two mappings match a request URL, then Traffic Server applies the " +"first mapping listed in the :file:`remap.config` file." +msgstr "" +"Traffic Server がマッチするものを見つけた場合、リクエスト URL をマップルール" +"の置換 URL に変換します。リクエスト URL のホストとパスを置換 URL に一致する" +"ようにセットします。もし URL がパスのプレフィックスを持っている場合、" +"Traffic Server はターゲット URL のパスからプレフィックスを取り除き、置換 " +"URL のパス部分と置き換えます。もしリクエスト URL にマッチするものが2つあった" +"場合、Traffic Server は :file:`remap.config` ファイル内で先にマッチするほう" +"を適用します。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:216 +msgid "reverse-map rule" +msgstr "リバースマップルール" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:218 +msgid "" +"A **reverse-map rule** translates the URL in origin server redirect " +"responses to point to Traffic Server so that clients are **redirected** to " +"Traffic Server instead of accessing an origin server directly. For example, " +"if there is a directory ``/pub`` on an origin server at ``www.molasses." +"com`` and a client sends a request to that origin server for ``/pub``, then " +"the origin server might reply with a redirect by sending the Header " +"``Location: http://www.test.com/pub/`` to let the client know that it was a " +"directory it had requested, not a document (a common use of redirects is to " +"normalize URLs so that clients can bookmark documents properly)." +msgstr "" +"\\ **リバースマップルール**\\ はクライアントがオリジンサーバーに直接アクセス" +"する代わりに Traffic Server に\\ **リダイレクト**\\ されるようにするために、" +"オリジンサーバーのリダイレクトレスポンス内の URL を Traffic Server に向かう" +"ように変換します。例えば、``www.molasses.com`` というオリジンサーバーに ``/" +"pub`` というディレクトリがあり、クライアントがそのオリジンサーバーに ``/" +"pub`` のリクエストを送信すると、オリジンサーバーはリクエストされたものがド" +"キュメントではなくディレクトリであることを知らせるために ``Loacation http://" +"www.test.com/pub/`` ヘッダーによるリダイレクトで応答するかもしれません。(リ" +"ダイレクトの一般的な使われ方はクライアントがドキュメントを正しくブックマーク" +"できるようにする URL の正規化です。) 。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:229 +msgid "" +"Traffic Server uses ``reverse_map`` rules to prevent clients (that receive " +"redirects from origin servers) from bypassing Traffic Server and directly " +"accessing the origin servers. In many cases the client would be hitting a " +"wall because ``realhost.com`` actually does not resolve for the client. (E." +"g.: Because it's running on a port shielded by a firewall, or because it's " +"running on a non-routable LAN IP)" +msgstr "" +"Traffic Server は (オリジンサーバーからリダイレクト指示を受けた) クライアン" +"トが Traffic Server をバイパスしてオリジンサーバーに直接アクセスすることを防" +"ぐために ``reverse_map`` ルールを使用します。クライアントが壁にぶつかる多く" +"のケースは ``realhost.com`` が実際にはクライアントには解決できない場合です。" +"(例: ファイアーウォールでポートが塞がれている、到達不可能な LAN の IP で動い" +"ている)" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:236 +msgid "" +"Both map and reverse-map rules consist of a **target** (origin) URL and a " +"**replacement** (destination) URL. In a **map rule**, the target URL points " +"to Traffic Server and the replacement URL specifies where the original " +"content is located. In a **reverse-map rule**, the target URL specifies " +"where the original content is located and the replacement URL points to " +"Traffic Server. Traffic Server stores mapping rules in the ``remap.config`` " +"file located in the Traffic Server ``config`` directory." +msgstr "" +"マップルールとリバースマップルールはどちらも\\ **ターゲット** (オリジン) " +"URL と\\ **置換**\\ (宛先) URL で構成されます。\\ **マップルール**\\ では、" +"ターゲット URL は Traffic Server を指し、置換 URL オリジナルコンテントがある" +"場所を指しています。\\ **リバースマップルール**\\ では、ターゲット URL はオ" +"リジナルコンテントがある場所を指し、置換 URL は Traffic Server を指していま" +"す。 Traffic Server はマッピングルールを Traffic Server の ``config`` ディレ" +"クトリにある ``remap.config`` に保存します。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:246 +msgid "Creating Mapping Rules for HTTP Requests" +msgstr "HTTP リクエスト用マッピングルールの作成" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:248 +msgid "To create mapping rules" +msgstr "マッピングルールを作成するには" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:250 +msgid "Enter the map and reverse-map rules into the :file:`remap.config` file" +msgstr "" +":file:`remap.config` ファイルにマップルールとリバースマップルールを入力して" +"ください。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:251 +#: ../../admin/reverse-proxy-http-redirects.en.rst:263 +#: ../../admin/reverse-proxy-http-redirects.en.rst:306 +msgid "" +"Run the command :option:`traffic_line -x` to apply the configuration " +"changes." +msgstr "" +"設定の変更を適用するために :option:`traffic_line -x` コマンドを実行してくだ" +"さい。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:255 +msgid "Enabling HTTP Reverse Proxy" +msgstr "HTTP リバースプロキシーの有効化" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:257 +msgid "To enable HTTP reverse proxy, follow the steps below." +msgstr "HTTP リバースプロキシーを有効にするには次のステップに従ってください。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:259 +msgid "Edit the following variable in :file:`records.config`" +msgstr ":file:`records.config` にある次の変数を編集してください。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:261 +msgid ":ts:cv:`proxy.config.reverse_proxy.enabled`" +msgstr ":ts:cv:`proxy.config.reverse_proxy.enabled`" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:267 +msgid "Setting Optional HTTP Reverse Proxy Options" +msgstr "省略可能な HTTP リバースプロキシーオプションの設定" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:269 +msgid "" +"Traffic Server provides several reverse proxy configuration options in :" +"file:`records.config` that enable you to:" +msgstr "" +"Traffic Server は :file:`records.config` でいくつかのリバースプロキシー設定" +"オプションを提供し次のことを可能にします。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:272 +msgid "" +"Configure Traffic Server to retain the client host header information in a " +"request during translation (:ts:cv:`proxy.config.url_remap." +"pristine_host_hdr`)" +msgstr "" +"Traffic Server がリクエストを変換する際にホストヘッダーの情報を維持するよう" +"に設定する (:ts:cv:`proxy.config.url_remap.pristine_host_hdr`)" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:275 +msgid "" +"Configure Traffic Server to serve requests only to the origin servers " +"listed in the mapping rules. As a result, requests to origin servers not " +"listed in the mapping rules are not served. (:ts:cv:`proxy.config.url_remap." +"remap_required`)" +msgstr "" +"Traffic Server がマッピングルールのリストに存在するオリジンサーバーへのリク" +"エストのみに応えるように設定する。結果として、リストに存在しないオリジンサー" +"バーへのリクエストは処理されません。(:ts:cv:`proxy.config.url_remap." +"remap_required`)" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:278 +msgid "" +"Specify an alternate URL to which incoming requests from older clients (i." +"e., ones that do not provide ``Host`` headers) are directed (:ts:cv:`proxy." +"config.header.parse.no_host_url_redirect`)" +msgstr "" +"古めのクライアントから届くリクエスト (例 ``Host`` ヘッダーを含まないもの) の" +"リダイレクト先となる代替 URL を指定する(:ts:cv:`proxy.config.header.parse." +"no_host_url_redirect`)" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:282 +msgid "" +"Don't forget to run the command :option:`traffic_line -x` to apply the " +"configuration changes." +msgstr "" +"設定の変更を適用するために :option:`traffic_line -x` コマンドを実行するのを" +"忘れないでください。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:286 +msgid "Redirecting HTTP Requests" +msgstr "HTTP リクエストのリダイレクト" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:288 +msgid "" +"You can configure Traffic Server to redirect HTTP requests without having " +"to contact any origin servers. For example, if you redirect all requests " +"for ``http://www.ultraseek.com`` to ``http://www.server1.com/products/" +"portal/search/``, then all HTTP requests for ``www.ultraseek.com`` go " +"directly to ``www.server1.com/products/portal/search``." +msgstr "" +"Traffic Server をどのオリジンサーバーにもコンタクトさせることなく HTTP リク" +"エストをリダイレクトするように設定できます。例えば、``http://www.ultraseek." +"com`` へのすべてのリクエストを```http://www.server1.com/products/portal/" +"search/`` にリダイレクトする場合はすべての ``www.ultraseek.com`` への HTTP " +"リクエストは直接 ``www.server1.com/products/portal/search`` に向かいます。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:295 +msgid "" +"You can configure Traffic Server to perform permanent or temporary " +"redirects. **Permanent redirects** notify the browser of the URL change (by " +"returning the HTTP status code **``301``**) so that the browser can update " +"bookmarks. **Temporary redirects** notify the browser of the URL change for " +"the current request only (by returning the HTTP status code **``307``** )." +msgstr "" +"Traffic Server を恒久的または一時的なリダイレクトを行うように設定できま" +"す。\\ **恒久的なリダイレクト**\\ はブラウザーがブックマークを更新できるよう" +"にURL の変更を(HTTP ステータスコード **``301``** を返すことで) ブラウザーに" +"通知します。\\ **一時的なリダイレクト**\\ は今回のリクエストに限った URL の" +"変更を(HTTP ステータスコード **``307``** を返すことで)ブラウザーに通知しま" +"す。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:302 +msgid "To set redirect rules" +msgstr "リダイレクトルールをセットするには" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:304 +msgid "" +"For each redirect you want to set enter a mapping rule in the :file:`remap." +"config` file" +msgstr "" +"各リダイレクトごとに :file:`remap.config` ファイルにマッピングルールを入力し" +"てください。" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:310 +msgid "Example" +msgstr "例" + +#: ../../admin/reverse-proxy-http-redirects.en.rst:312 +msgid "" +"The following permanently redirects all HTTP requests for ``www.server1." +"com`` to ``www.server2.com``: ::" +msgstr "" +"次の設定は ``www.server1.com`` へのすべての HTTP リクエストを恒久的に ``www." +"server2.com`` へリダイレクトします。 ::" diff --git a/doc/locale/ja/LC_MESSAGES/admin/security-options.en.po b/doc/locale/ja/LC_MESSAGES/admin/security-options.en.po new file mode 100644 index 00000000..a099dfcf --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/security-options.en.po @@ -0,0 +1,493 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/security-options.en.rst:4 +msgid "Security Options" +msgstr "セキュリティオプション" + +#: ../../admin/security-options.en.rst:23 +msgid "Traffic Server provides a number of security features." +msgstr "Traffic Server はいくつものセキュリティ機能を提供します。" + +#: ../../admin/security-options.en.rst:28 +msgid "Controlling Client Access to the Proxy Cache" +msgstr "クライアントからプロキシーキャッシュへのアクセスの制御" + +#: ../../admin/security-options.en.rst:30 +msgid "" +"You can configure Traffic Server to allow only certain clients to use the " +"proxy cache by editing a configuration file." +msgstr "" +"設定ファイルを編集することで、信頼できるクライアントのみにプロキシーキャッ" +"シュの使用を許すように Traffic Server を設定することができます。" + +#: ../../admin/security-options.en.rst:33 +msgid "" +"Add a line in :file:`ip_allow.config` for each IP address or range of IP " +"addresses allowed to access Traffic Server." +msgstr "" +":file:`ip_allow.config` に Traffic Server へのアクセスを許す個々の IP アドレ" +"スもしくは IP アドレスの範囲を 1 行書き足してください。" + +#: ../../admin/security-options.en.rst:35 +#: ../../admin/security-options.en.rst:63 +msgid "" +"Run the command :option:`traffic_line -x` to apply the configuration " +"changes." +msgstr ":option:`traffic_line -x` を実行して設定の変更を適用してください。" + +#: ../../admin/security-options.en.rst:41 +msgid "Configuring DNS Server Selection (Split DNS)" +msgstr "DNS サーバー選択 (Split DNS) の設定" + +#: ../../admin/security-options.en.rst:43 +msgid "" +"The **Split DNS** option enables you to configure Traffic Server to use " +"multiple DNS servers, as dictated by your security requirements. For " +"example, you might configure Traffic Server to use one set of DNS servers " +"to resolve hostnames on your internal network, while allowing DNS servers " +"outside the firewall to resolve hosts on the Internet. This maintains the " +"security of your intranet, while continuing to provide direct access to " +"sites outside your organization." +msgstr "" +"**Split DNS** オプションはセキュリティ要件に規定されたとおりに Traffic " +"Server が複数の DNS サーバーを使用するように設定することを可能にします。例え" +"ば、Traffic Server が内部ネットワークのホスト名を解決するために 1 組の DNS " +"サーバーを使用する一方で、ファイアーウォールの外にある DNS サーバーにはイン" +"ターネット上のホストの解決を許すように設定してもよいでしょう。これはイントラ" +"ネットのセキュリティを維持すると同時に、組織外のサイトへの直接アクセスも提供" +"し続けます。" + +#: ../../admin/security-options.en.rst:51 +msgid "To configure Split DNS, you must do the following:" +msgstr "Split DNS を設定するには次のことをしなければなりません" + +#: ../../admin/security-options.en.rst:53 +msgid "" +"Specify the rules for performing DNS server selection based on the " +"destination domain, the destination host, or a URL regular expression." +msgstr "" +"DNS サーバーが宛先ドメイン、宛先ホストもしくは URL の正規表現をもとに動作す" +"るためのルールを指定する" + +#: ../../admin/security-options.en.rst:56 +msgid "Enable the **Split DNS** option." +msgstr "**Split DNS** オプションを有効化する" + +#: ../../admin/security-options.en.rst:58 +msgid "To do this, we" +msgstr "これを行うためには" + +#: ../../admin/security-options.en.rst:60 +msgid "Add rules to :file:`splitdns.config`." +msgstr ":file:`splitdns.config` にルールを追加してください。" + +#: ../../admin/security-options.en.rst:61 +msgid "" +"In :file:`records.config` set the variable :ts:cv:`proxy.config.dns." +"splitDNS.enabled` to ``1`` to enable split DNS." +msgstr "" +"Split DNS を有効にするために :file:`records.config` の:ts:cv:`proxy.config." +"dns.splitDNS.enabled` 変数を ``1`` に設定してください。" + +#: ../../admin/security-options.en.rst:69 +msgid "Using SSL Termination" +msgstr "SSL ターミネーションの使用" + +#: ../../admin/security-options.en.rst:71 +msgid "" +"The Traffic Server **SSL termination** option enables you to secure " +"connections in reverse proxy mode between a client and a Traffic Server and/" +"or Traffic Server and an origin server." +msgstr "" +"Traffic Server の **SSL ターミネーション**\\ オプションはリバースプロキシー" +"モードでクライアントと Traffic Server の間および/もしくは Traffic Server と" +"オリジンサーバーの間の接続を安全にすることを可能にします。" + +#: ../../admin/security-options.en.rst:75 +msgid "" +"The following sections describe how to enable and configure the SSL " +"termination option." +msgstr "" +"次の章では SSL ターミネーションをどうのように設定し有効化するかを説明しま" +"す。" + +#: ../../admin/security-options.en.rst:78 +msgid "" +"Enable and configure SSL termination for client/Traffic Server " +"connections: :ref:`client-and-traffic-server-connections`" +msgstr "" +"クライアントと Traffic Server 間の接続で SSL ターミネーションを設定し有効化" +"するには :ref:`client-and-traffic-server-connections` を参照してください。" + +#: ../../admin/security-options.en.rst:80 +msgid "" +"Enable and configure SSL termination for Traffic Server/origin server " +"connections: :ref:`traffic-server-and-origin-server-connections`" +msgstr "" +"Traffic Server とオリジンサーバー間の接続で SSL ターミネーションを設定し有効" +"化するには :ref:`traffic-server-and-origin-server-connections` を参照してく" +"ださい。" + +#: ../../admin/security-options.en.rst:82 +msgid "" +"Enable and configure SSL termination for both client/Traffic Server and " +"Traffic Server/origin server connections: :ref:`client-and-traffic-server-" +"connections` :ref:`traffic-server-and-origin-server-connections`, " +"respectively." +msgstr "" +"クライアントと Traffic Server 間の接続、Traffic Server とオリジンサーバー間" +"の接続の両方で SSL ターミネーションを設定し有効化するには :ref:`client-and-" +"traffic-server-connections` と :ref:`traffic-server-and-origin-server-" +"connections` をそれぞれ参照してください。" + +#: ../../admin/security-options.en.rst:89 +msgid "Client and Traffic Server Connections" +msgstr "クライアントと Traffic Server の接続" + +#: ../../admin/security-options.en.rst:91 +msgid "" +"The figure below illustrates communication between a client and Traffic " +"Server (and between Traffic Server and an origin server) when the SSL " +"termination option is enabled & configured for **client/Traffic Server " +"connections only**." +msgstr "" +"以下の図は\\ **クライアントと Traffic Server の間の接続だけ**\\ で SSL ター" +"ミネーションオプションを有効化し、設定した際のクライアントと Traffic Server " +"の通信 (Traffic Server とオリジンサーバー間も含む) を説明します。" + +#: ../../admin/security-options.en.rst:100 +msgid "Client and Traffic Server communication using SSL termination" +msgstr "SSL ターミネーションを使用したクライアントと Traffic Server の通信" + +#: ../../admin/security-options.en.rst:102 +#: ../../admin/security-options.en.rst:167 +msgid "The figure above depicts the following:" +msgstr "上の図は次のことを説明しています" + +#: ../../admin/security-options.en.rst:104 +msgid "" +"The client sends an HTTPS request for content. Traffic Server receives the " +"request and performs the SSL 'handshake' to authenticate the client " +"(depending on the authentication options configured) and determine the " +"encryption method that will be used. If the client is allowed access, then " +"Traffic Server checks its cache for the requested content." +msgstr "" +"クライアントがコンテンツのために HTTPS リクエストを送信します。Traffic " +"Server はリクエストを受け取り、クライアントの認証 (認証オプションの設定によ" +"ります) と使用される暗号化方式の決定をするために SSL 'ハンドシェイク' を行い" +"ます。クライアントがアクセスを許されると、Traffic Server はリクエストされた" +"コンテンツのキャッシュを確認します。" + +#: ../../admin/security-options.en.rst:106 +msgid "" +"If the request is a cache hit and the content is fresh, then Traffic Server " +"encrypts the content and sends it to the client. The client decrypts the " +"content (using the method determined during the handshake) and displays it." +msgstr "" +"リクエストがキャッシュヒットかつコンテンツが新鮮な場合、Traffic Server はコ" +"ンテンツを暗号化しクライアントに送信します。クライアントはコンテンツを (ハン" +"ドシェイクの間に決定した方式で) 復号化し表示します。" + +#: ../../admin/security-options.en.rst:108 +msgid "" +"If the request is a cache miss or cached content is stale, then Traffic " +"Server communicates with the origin server via HTTP and obtains a plain " +"text version of the content. Traffic Server saves the plain text version of " +"the content in its cache, encrypts the content, and sends it to the client. " +"The client decrypts and displays the content." +msgstr "" +"リクエストがキャッシュミスもしくはキャッシュされたコンテンツが新鮮でない場" +"合、Traffic Server はオリジンサーバーと HTTP で通信し平文のコンテンツを取得" +"します。Traffic Server は平文のコンテンツをキャッシュに保存し、コンテンツを" +"暗号化し、クライアントに送信します。クライアントはコンテンツを復号化し表示し" +"ます。" + +#: ../../admin/security-options.en.rst:110 +msgid "" +"To configure Traffic Server to use the SSL termination option for client/" +"Traffic Server connections, you must do the following:" +msgstr "" +"Traffic Server がクライアントと Traffic Server の間の接続で SSL ターミネー" +"ションオプションを使用するように設定するには、次のことを行わなければなりませ" +"ん" + +#: ../../admin/security-options.en.rst:113 +msgid "" +"Obtain and install an SSL server certificate from a recognized certificate " +"authority. The SSL server certificate contains information that enables the " +"client to authenticate Traffic Server and exchange encryption keys." +msgstr "" +"SSL サーバー証明書を認められた認証局から取得しインストールする。SSL サーバー" +"証明書はクライアントが Traffic Server を認証し暗号化鍵を交換することを可能に" +"する情報を含んでいます。" + +#: ../../admin/security-options.en.rst:117 +#: ../../admin/security-options.en.rst:194 +msgid "Configure SSL termination options:" +msgstr "SSL ターミネーションオプションを設定する" + +#: ../../admin/security-options.en.rst:119 +msgid "" +"Set the port number used for SSL communication using :ts:cv:`proxy.config." +"http.server_ports`." +msgstr "" +":ts:cv:`proxy.config.http.server_ports` で SSL 通信に使用されるポート番号を" +"設定する" + +#: ../../admin/security-options.en.rst:122 +msgid "" +"(Optional) Configure the use of client certificates: Client certificates " +"are located on the client. If you configure Traffic Server to require " +"client certificates, then Traffic Server verifies the client certificate " +"during the SSL handshake that authenticates the client. If you configure " +"Traffic Server to *not* require client certificates, then access to Traffic " +"Server is managed through other Traffic Server options that have been set " +"(such as rules in :file:`ip_allow.config`)." +msgstr "" +"(任意) クライアント証明書の使用を設定する。クライアント証明書はクライアント" +"上に置かれます。Traffic Server がクライアント証明書を必要とするように設定す" +"ると Traffic Server は クライアントを認証する SSL ハンドシェイクの中でクライ" +"アント証明書を検証します。Traffic Server がクライアント証明書を必要と\\ *し" +"ない*\\ ように設定すると、Traffic Server へのアクセスは (:file:`ip_allow." +"config` のルールなどの) 他に設定されている Traffic Server オプションによって" +"管理されます。" + +#: ../../admin/security-options.en.rst:130 +msgid "" +"(Optional) Configure the use of Certification Authorities (CAs). CAs add " +"security by verifying the identity of the person requesting a certificate." +msgstr "" +"(任意) 認証局 (CA) の使用を設定する。認証局は証明を要求した人の同一性を検証" +"することでセキュリティを追加します。" + +#: ../../admin/security-options.en.rst:134 +msgid "In order to accomplish this, we" +msgstr "これを実現するためには" + +#: ../../admin/security-options.en.rst:136 +msgid "" +"Edit the following variables in the :ref:`records-config-ssl-termination` " +"section of :file:`records.config`" +msgstr "" +":file:`records.config` の :ref:`records-config-ssl-termination` の章にある次" +"の変数を編集してください。" + +#: ../../admin/security-options.en.rst:139 +#: ../../admin/security-options.en.rst:217 +msgid ":ts:cv:`proxy.config.http.server_ports`" +msgstr ":ts:cv:`proxy.config.http.server_ports`" + +#: ../../admin/security-options.en.rst:140 +msgid ":ts:cv:`proxy.config.ssl.client.certification_level`" +msgstr ":ts:cv:`proxy.config.ssl.client.certification_level`" + +#: ../../admin/security-options.en.rst:141 +msgid ":ts:cv:`proxy.config.ssl.server.cert.path`" +msgstr ":ts:cv:`proxy.config.ssl.server.cert.path`" + +#: ../../admin/security-options.en.rst:142 +msgid ":ts:cv:`proxy.config.ssl.server.private_key.path`" +msgstr ":ts:cv:`proxy.config.ssl.server.private_key.path`" + +#: ../../admin/security-options.en.rst:143 +msgid ":ts:cv:`proxy.config.ssl.CA.cert.path`" +msgstr ":ts:cv:`proxy.config.ssl.CA.cert.path`" + +#: ../../admin/security-options.en.rst:145 +#: ../../admin/security-options.en.rst:226 +msgid "" +"Run the command :option:`traffic_line -L` to restart Traffic Server on the " +"local node or :option:`traffic_line -M` to restart Traffic Server on all " +"the nodes in a cluster." +msgstr "" +"ローカルノード上の Traffic Server をリスタートするために :option:" +"`traffic_line -L` を、もしくはクラスター内のすべての Traffic Server をリス" +"タートするためには :option:`traffic_line -M` を実行してください。" + +#: ../../admin/security-options.en.rst:155 +msgid "Traffic Server and Origin Server Connections" +msgstr "Traffic Server とオリジンサーバーの接続" + +#: ../../admin/security-options.en.rst:157 +msgid "" +"The figure below illustrates communication between Traffic Server and an " +"origin server when the SSL termination option is enabled for **Traffic " +"Server/origin server connections**." +msgstr "" +"以下の図は SSL ターミネーションオプションを \\ **Traffic Server とオリジン" +"サーバーの間の接続**\\ で有効化した際の Traffic Server とオリジンサーバーの" +"通信を説明します。" + +#: ../../admin/security-options.en.rst:165 +msgid "Traffic Server and origin server communication using SSL termination" +msgstr "SSL ターミネーションを使用したTraffic Server とオリジンサーバーの通信" + +#: ../../admin/security-options.en.rst:169 +msgid "" +"**Step 1:** If a client request is a cache miss or is stale, then Traffic " +"Server sends an HTTPS request for the content to the origin server. The " +"origin server receives the request and performs the SSL handshake to " +"authenticate Traffic Server and determine the encryption method to be used." +msgstr "" +"**ステップ 1:** クライアントのリクエストがキャッシュミスもしくは新鮮でない場" +"合、Traffic Server は コンテンツのための HTTPS リクエストをオリジンサーバー" +"に送信します。オリジンサーバーはリクエストを受け取り、Traffic Server の認証" +"と使用される暗号化方式の決定をするために SSL ハンドシェイク を行います。" + +#: ../../admin/security-options.en.rst:175 +msgid "" +"**Step 2:** If Traffic Server is allowed access, then the origin server " +"encrypts the content and sends it to Traffic Server, where it is decrypted " +"(using the method determined during the handshake). A plain text version of " +"the content is saved in the cache." +msgstr "" +"**ステップ 2:** Traffic Server がアクセスを許されると、オリジンサーバーはコ" +"ンテンツを暗号化して Traffic Server に送信し、そこで (ハンドシェイクの中で決" +"定した方式を使用して) 復号化されます。平文のコンテンツはキャッシュに保存され" +"ます。" + +#: ../../admin/security-options.en.rst:180 +msgid "" +"**Step 3:** If SSL termination is enabled for client /Traffic Server " +"connections, then Traffic Server re-encrypts the content and sends it to " +"the client via HTTPS, where it is decrypted and displayed. If SSL " +"termination is not enabled for client/Traffic Server connections, then " +"Traffic Server sends the plain text version of the content to the client " +"via HTTP." +msgstr "" +"**ステップ 3:** SSL ターミネーションがクライアントと Traffic Server の間で有" +"効化されている場合、Traffic Server はコンテンツを再暗号化して HTTPS でクライ" +"アントに送信し、そこで復号化され表示されます。SSL ターミネーションがクライア" +"ントと Traffic Server の間で有効化されていない場合、Traffic Server は平文の" +"コンテンツを HTTP でクライアントに送信します。" + +#: ../../admin/security-options.en.rst:187 +msgid "" +"To configure Traffic Server to use the SSL termination option for Traffic " +"Server and origin server connections, you must do the following:" +msgstr "" +"Traffic Server が Traffic Server とオリジンサーバーの間で SSL ターミネーショ" +"ンを使用するように設定するには、次のことを行わなければなりません" + +#: ../../admin/security-options.en.rst:190 +msgid "" +"Obtain and install an SSL client certificate from a recognized certificate " +"authority. The SSL client certificate contains information that allows the " +"origin server to authenticate Traffic Server (the client certificate is " +"optional)." +msgstr "" +"SSL クライアント証明書を認められた認証局から取得しインストールする。SSL クラ" +"イアント証明書はオリジンサーバーが Traffic Server を認証することを可能にする" +"情報を含んでいます (クライアント証明書は任意です) 。" + +#: ../../admin/security-options.en.rst:195 +msgid "Enable the SSL termination option." +msgstr "SSL ターミネーションオプションを有効化する" + +#: ../../admin/security-options.en.rst:197 +msgid "Set the port number used for SSL communication." +msgstr "SSL 通信に使用されるポート番号を設定する。" + +#: ../../admin/security-options.en.rst:198 +msgid "" +"Specify the filename and location of the SSL client certificate (if you " +"choose to use a client certificate)." +msgstr "" +"SSL クライアント証明書のファイル名と場所を指定する (クライアント証明書を使用" +"することを選択する場合) 。" + +#: ../../admin/security-options.en.rst:200 +msgid "" +"Specify the filename and location of the Traffic Server private key (if the " +"private key is not located in the client certificate file). Traffic Server " +"uses its private key during the SSL handshake to decrypt the session " +"encryption keys. The private key must be stored and protected against theft." +msgstr "" +"Traffic Server の秘密鍵のファイル名と場所を指定する (秘密鍵がクライアント証" +"明書ファイル内に無い場合) 。Traffic Server はセッション暗号化鍵を復号化する" +"ために SSL ハンドシェイクの中で自身の秘密鍵を使用します。秘密鍵は盗難から保" +"護されて置かれていなければなりません。" + +#: ../../admin/security-options.en.rst:205 +msgid "" +"Configure the use of CAs. CAs allow the Traffic Server that's acting as a " +"client to verify the identity of the server with which it is communicating, " +"thereby enabling exchange of encryption keys." +msgstr "" +"CA の使用を設定する。CA は 通信しているサーバーの同一性を検証するために " +"Traffic Server がクライアントとして振る舞うことを許し、それによって暗号化鍵" +"の交換を可能にしています。" + +#: ../../admin/security-options.en.rst:209 +msgid "In order to accomplish this, we:" +msgstr "これを実現するためには" + +#: ../../admin/security-options.en.rst:213 +msgid "" +"Edit the following variables in the :ref:`records-config-ssl-termination` " +"section of :file:`records.config`:" +msgstr "" +":file:`records.config`: の :ref:`records-config-ssl-termination` の章にある" +"次の変数を編集してください。" + +#: ../../admin/security-options.en.rst:218 +msgid ":ts:cv:`proxy.config.ssl.client.verify.server`" +msgstr ":ts:cv:`proxy.config.ssl.client.verify.server`" + +#: ../../admin/security-options.en.rst:219 +msgid ":ts:cv:`proxy.config.ssl.client.cert.filename`" +msgstr ":ts:cv:`proxy.config.ssl.client.cert.filename`" + +#: ../../admin/security-options.en.rst:220 +msgid ":ts:cv:`proxy.config.ssl.client.cert.path`" +msgstr ":ts:cv:`proxy.config.ssl.client.cert.path`" + +#: ../../admin/security-options.en.rst:221 +msgid ":ts:cv:`proxy.config.ssl.client.private_key.filename`" +msgstr ":ts:cv:`proxy.config.ssl.client.private_key.filename`" + +#: ../../admin/security-options.en.rst:222 +msgid ":ts:cv:`proxy.config.ssl.client.private_key.path`" +msgstr ":ts:cv:`proxy.config.ssl.client.private_key.path`" + +#: ../../admin/security-options.en.rst:223 +msgid ":ts:cv:`proxy.config.ssl.client.CA.cert.filename`" +msgstr ":ts:cv:`proxy.config.ssl.client.CA.cert.filename`" + +#: ../../admin/security-options.en.rst:224 +msgid ":ts:cv:`proxy.config.ssl.client.CA.cert.path`" +msgstr ":ts:cv:`proxy.config.ssl.client.CA.cert.path`" + +#: ../../admin/security-options.en.rst:120 +msgid "" +"Edit :file:`ssl_multicert.config` to specify the filename and location of " +"the SSL certificates and private keys." +msgstr "" +"SSL 証明書と秘密鍵のファイル名と場所を指定するために :file:`ssl_multicert." +"config` を編集する" diff --git a/doc/locale/ja/LC_MESSAGES/admin/traffic-server-error-messages.en.po b/doc/locale/ja/LC_MESSAGES/admin/traffic-server-error-messages.en.po new file mode 100644 index 00000000..8e907c5c --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/traffic-server-error-messages.en.po @@ -0,0 +1,872 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/traffic-server-error-messages.en.rst:4 +msgid "Error Messages" +msgstr "エラーメッセージ" + +#: ../../admin/traffic-server-error-messages.en.rst:27 +msgid "Traffic Server Error Messages" +msgstr "Traffic Server のエラーメッセージ" + +#: ../../admin/traffic-server-error-messages.en.rst:29 +msgid "" +"The following table lists messages that can appear in system log files. " +"This list is not exhaustive; it simply describes common warning messages " +"that can occur and which might require your attention." +msgstr "" +"次の表はシステムログファイルに出てくるメッセージを一覧表示しています。この一" +"覧は網羅的なものではなく、単に一般的な発生しうる注目を要するであろう警告メッ" +"セージを説明しているだけです。" + +#: ../../admin/traffic-server-error-messages.en.rst:34 +msgid "Traffic Server Process Fatal" +msgstr "Traffic Server プロセスの致命的状況" + +#: ../../admin/traffic-server-error-messages.en.rst:38 +msgid "``Accept port is not between 1 and 65535. Please check configuration``" +msgstr "" +"``Accept port is not between 1 and 65535. Please check configuration``" + +#: ../../admin/traffic-server-error-messages.en.rst:37 +msgid "" +"The port specified in the :file:`records.config` file that accepts incoming " +"HTTP requests is not valid." +msgstr "" +":file:`records.config` ファイルに指定されている入ってきた HTTP リクエストを" +"受け入れるポートが妥当ではありません。" + +#: ../../admin/traffic-server-error-messages.en.rst:43 +msgid "``Self loop is detected in parent proxy configuration``" +msgstr "``Self loop is detected in parent proxy configuration``" + +#: ../../admin/traffic-server-error-messages.en.rst:41 +msgid "" +"The name and port of the parent proxy match that of Traffic Server. This " +"creates a loop when Traffic Server attempts to send the request to the " +"parent proxy." +msgstr "" +"親プロキシーの名前とポートが Traffic Server のものと一致しています。これは " +"Traffic Server が親プロキシーにリクエストを送信しようとする際にループを引き" +"起こします。" + +#: ../../admin/traffic-server-error-messages.en.rst:46 +msgid "Traffic Server Warnings" +msgstr "Traffic Server の警告" + +#: ../../admin/traffic-server-error-messages.en.rst:49 +msgid "`` error: error_number``" +msgstr "`` error: error_number``" + +#: ../../admin/traffic-server-error-messages.en.rst:49 +msgid "Generic logging error." +msgstr "一般的なロギングエラーです。" + +#: ../../admin/traffic-server-error-messages.en.rst:52 +msgid "" +"``Bad cluster major version range for node " +"connect failed``" +msgstr "" +"``Bad cluster major version range for node " +"connect failed``" + +#: ../../admin/traffic-server-error-messages.en.rst:52 +msgid "Incompatible software versions causing a problem." +msgstr "互換性の無いソフトウェアのバージョンが問題を引き起こしています。" + +#: ../../admin/traffic-server-error-messages.en.rst:56 +msgid "``Connect by disallowed client , closing``" +msgstr "``Connect by disallowed client , closing``" + +#: ../../admin/traffic-server-error-messages.en.rst:55 +msgid "" +"The specified client is not allowed to connect to Traffic Server; the " +"client IP address is not listed in the ``ip_allow.config`` file." +msgstr "" +"指定されたクライアントは Traffic Server へのアクセスを許可されていません。ク" +"ライアントの IP アドレスが ``ip_allow.config`` ファイル内に記載されていませ" +"ん。" + +#: ../../admin/traffic-server-error-messages.en.rst:59 +msgid "``Could not rename log to ``" +msgstr "``Could not rename log to ``" + +#: ../../admin/traffic-server-error-messages.en.rst:59 +msgid "System error when renaming log file during roll." +msgstr "ロール中にログファイルをリネームしようとした際のシステムエラーです。" + +#: ../../admin/traffic-server-error-messages.en.rst:62 +msgid "``Did of backup; still to do ``" +msgstr "``Did of backup; still to do ``" + +#: ../../admin/traffic-server-error-messages.en.rst:62 +msgid "Congestion is approaching." +msgstr "輻輳が発生しそうです。" + +#: ../../admin/traffic-server-error-messages.en.rst:65 +msgid "" +"``Different clustering minor versions for node continuing``" +msgstr "" +"``Different clustering minor versions for node continuing``" + +#: ../../admin/traffic-server-error-messages.en.rst:65 +msgid "Incompatible software versions are causing a problem." +msgstr "互換性の無いソフトウェアのバージョンが問題を引き起こしています。" + +#: ../../admin/traffic-server-error-messages.en.rst:69 +msgid "``Log format symbol not found``" +msgstr "``Log format symbol not found``" + +#: ../../admin/traffic-server-error-messages.en.rst:68 +msgid "" +"Custom log format references a field symbol that does not exist. Refer to :" +"ref:`event-logging-formats`." +msgstr "" +"カスタムログのフォーマットが存在しないフィールドを参照しています。 :ref:" +"`event-logging-formats` を参照してください。" + +#: ../../admin/traffic-server-error-messages.en.rst:72 +msgid "``Missing field for field marker``" +msgstr "``Missing field for field marker``" + +#: ../../admin/traffic-server-error-messages.en.rst:72 +msgid "Error reading a log buffer." +msgstr "ログバッファーを読み込み中のエラーです。" + +#: ../../admin/traffic-server-error-messages.en.rst:75 +msgid "``Unable to open log file , errno=``" +msgstr "``Unable to open log file , errno=``" + +#: ../../admin/traffic-server-error-messages.en.rst:75 +msgid "Cannot open the log file." +msgstr "ログファイルが開けません。" + +#: ../../admin/traffic-server-error-messages.en.rst:79 +msgid "``Error accessing disk ``" +msgstr "``Error accessing disk ``" + +#: ../../admin/traffic-server-error-messages.en.rst:78 +msgid "" +"Traffic Server might have a cache read problem. You might need to replace " +"the disk." +msgstr "" +"Traffic Server がキャッシュの読み込みに問題を抱えているかもしれません。ディ" +"スクを交換する必要があるかもしれません。" + +#: ../../admin/traffic-server-error-messages.en.rst:84 +msgid "``Too many errors accessing disk : declaring disk bad``" +msgstr "``Too many errors accessing disk : declaring disk bad``" + +#: ../../admin/traffic-server-error-messages.en.rst:82 +msgid "" +"Traffic Server is not using the cache disk because it encountered too many " +"errors. The disk might be corrupt and might have to be replaced." +msgstr "" +"Traffic Server は過度なエラーに遭遇したためキャッシュディスクを使用していま" +"せん。ディスクが壊れているかもしれないので交換する必要があるかもしれません。" + +#: ../../admin/traffic-server-error-messages.en.rst:89 +msgid "``No cache disks specified in storage.config file: cache disabled``" +msgstr "``No cache disks specified in storage.config file: cache disabled``" + +#: ../../admin/traffic-server-error-messages.en.rst:87 +msgid "" +"The Traffic Server ``storage.config`` file does not list any cache disks; " +"Traffic Server is running in proxy-only mode. You must add the disks you " +"want to use for the cache to :file:`storage.config`." +msgstr "" +"Traffic Server の ``storage.config`` ファイルにキャッシュディスクが記載され" +"ていません。Traffic Server はプロキシーモードで動作しています。キャッシュに" +"使用したいディスクを :file:`storage.config` に記載しなくてななりません。" + +#: ../../admin/traffic-server-error-messages.en.rst:92 +msgid "Traffic Server Alarm Messages" +msgstr "Traffic Server の警報メッセージ" + +#: ../../admin/traffic-server-error-messages.en.rst:96 +msgid "``[Rollback::Rollback] Config file is read-only: ``" +msgstr "``[Rollback::Rollback] Config file is read-only: ``" + +#: ../../admin/traffic-server-error-messages.en.rst:95 +#: ../../admin/traffic-server-error-messages.en.rst:103 +msgid "" +"Go to the Traffic Server ``config`` directory and check the indicated file " +"permissions; change if necessary." +msgstr "" +"Traffic Server の ``config`` ディレクトリで指示されたファイルのパーミッショ" +"ンを確認し、必要であれば変更してください。" + +#: ../../admin/traffic-server-error-messages.en.rst:100 +msgid "" +"``[Rollback::Rollback] Unable to read or write config file ``" +msgstr "" +"``[Rollback::Rollback] Unable to read or write config file ``" + +#: ../../admin/traffic-server-error-messages.en.rst:99 +msgid "" +"Go to the Traffic Server ``config`` directory and make sure the indicated " +"file exists. Check permissions and modify if necessary." +msgstr "" +"Traffic Server の ``config`` ディレクトリで指示されたファイルが存在すること" +"を確認してください。パーミッションを確認し、必要であれば変更してください" + +#: ../../admin/traffic-server-error-messages.en.rst:104 +msgid "``[Traffic Manager] Configuration File Update Failed: ``" +msgstr "" +"``[Traffic Manager] Configuration File Update Failed: ``" + +#: ../../admin/traffic-server-error-messages.en.rst:108 +msgid "``[Traffic Manager] Mgmt <==>Proxy conn. closed``" +msgstr "``[Traffic Manager] Mgmt <==>Proxy conn. closed``" + +#: ../../admin/traffic-server-error-messages.en.rst:107 +msgid "" +"An informational message to inform you that the :program:`traffic_server` " +"process is down." +msgstr "" +":program:`traffic_server` プロセスが落ちていることを知らせるための情報提供" +"メッセージです。" + +#: ../../admin/traffic-server-error-messages.en.rst:114 +msgid "``Access logging suspended - configured space allocation exhausted.``" +msgstr "``Access logging suspended - configured space allocation exhausted.``" + +#: ../../admin/traffic-server-error-messages.en.rst:111 +msgid "" +"The space allocated to the event log files is full; you must either " +"increase the space or delete some log files so that access logging to " +"continue. To prevent this error, consider rolling log files more frequently " +"and enabling the autodelete feature." +msgstr "" +"イベントログファイルに割り当てられた領域が満杯です。アクセスのロギングを続け" +"るために領域を増やすかいくつかのログファイルを削除しなければなりません。この" +"エラーを防ぐためには、ログファイルをより頻繁にロールし、自動削除機能を有効化" +"することを検討してください。" + +#: ../../admin/traffic-server-error-messages.en.rst:120 +msgid "``Access logging suspended - no more space on the logging partition.``" +msgstr "" +"``Access logging suspended - no more space on the logging partition.``" + +#: ../../admin/traffic-server-error-messages.en.rst:117 +msgid "" +"The entire partition containing the event logs is full; you must delete or " +"move some log files to enable access logging to continue. To prevent this " +"error, consider rolling log files more frequently and enabling the " +"autodelete feature." +msgstr "" +"イベントログを含むパーティション全体が満杯です。アクセスのロギングを続けるた" +"めにいくつかのログファイルを削除もしくは移動しなければなりません。このエラー" +"を防ぐためには、ログファイルをより頻繁にロールし、自動削除機能を有効化するこ" +"とを検討してください。" + +#: ../../admin/traffic-server-error-messages.en.rst:125 +msgid "``Created zero length place holder for config file ``" +msgstr "``Created zero length place holder for config file ``" + +#: ../../admin/traffic-server-error-messages.en.rst:123 +msgid "" +"Go to the Traffic Server ``config`` directory and check the indicated file. " +"If it is indeed zero in length, then use a backup copy of the configuration " +"file." +msgstr "" +"Traffic Server の ``config`` ディレクトリで指示されたファイルを確認してくだ" +"さい。もしたしかに長さがゼロの場合、設定ファイルのバックアップコピーを使用し" +"てください。" + +#: ../../admin/traffic-server-error-messages.en.rst:128 +msgid "``Traffic Server could not open logfile ``" +msgstr "``Traffic Server could not open logfile ``" + +#: ../../admin/traffic-server-error-messages.en.rst:128 +msgid "Check permissions for the indicated file and the logging directory." +msgstr "" +"指示されたファイルとロギングディレクトリのパーミッションを確認してください。" + +#: ../../admin/traffic-server-error-messages.en.rst:132 +msgid "" +"``Traffic Server failed to parse line of the logging config " +"file ``" +msgstr "" +"``Traffic Server failed to parse line of the logging config " +"file ``" + +#: ../../admin/traffic-server-error-messages.en.rst:131 +msgid "" +"Check your custom log configuration file; there could be syntax errors. " +"Refer to :ref:`custom-logging-fields` for correct custom log format fields." +msgstr "" +"カスタムログ設定ファイルを確認してください。シンタックスエラーがあるかもしれ" +"ません。正しいカスタムログのフォーマットフィールドについては :ref:`custom-" +"logging-fields` を参照してください。" + +#: ../../admin/traffic-server-error-messages.en.rst:137 +msgid "" +"``vip_config binary is not setuid root, manager will be unable to enable " +"virtual ip addresses``" +msgstr "" +"``vip_config binary is not setuid root, manager will be unable to enable " +"virtual ip addresses``" + +#: ../../admin/traffic-server-error-messages.en.rst:135 +msgid "" +"The :program:`traffic_manager` process is not able to set virtual IP " +"addresses. You must ``setuid root`` for the ``vip_config`` file in the " +"Traffic Server ``bin`` directory." +msgstr "" +":program:`traffic_manager` プロセスがバーチャル IP アドレス を設定できませ" +"ん。Traffic Server の ``bin`` ディレクトリにある ``vip_config`` ファイルに対" +"して ``setuid root`` しなければなりません。" + +#: ../../admin/traffic-server-error-messages.en.rst:140 +msgid "HTML Messages Sent to Clients" +msgstr "クライアントに送信される HTML メッセージ" + +#: ../../admin/traffic-server-error-messages.en.rst:142 +msgid "" +"Traffic Server returns detailed error messages to browser clients when " +"there are problems with the HTTP transactions requested by the browser. " +"These Traffic Server response messages correspond to standard HTTP response " +"codes, but provide more information. A list of the more frequently-" +"encountered HTTP response codes is provided in :ref:`standard-http-response-" +"messages`. You can customize the Traffic Server response messages, if " +"desired." +msgstr "" +"Traffic Server は ブラウザからリクエストされた HTTP トランザクションに問題が" +"ある場合に、クライアントに詳細なエラーメッセージを返します。これらの " +"Traffice Server のレスポンスメッセージは標準的な HTTP レスポンスコードに対応" +"しますが、より多くの情報を提供します。より頻繁に遭遇する HTTP レスポンスコー" +"ドの一覧は :ref:`standard-http-response-messages` で提供されています。もし望" +"むなら Traffic Server のレスポンスメッセージをカスタマイズすることができま" +"す。" + +#: ../../admin/traffic-server-error-messages.en.rst:149 +msgid "" +"The following table lists the hard-coded Traffic Server HTTP messages, with " +"corresponding HTTP response codes and customizable files." +msgstr "" +"次の表はハードコードされた Traffic Server の HTTP メッセージを、対応する " +"HTTP レスポンスコードとカスタマイズ可能なファイルとともに一覧表示していま" +"す。" + +#: ../../admin/traffic-server-error-messages.en.rst:155 +msgid "``Access Denied``" +msgstr "``Access Denied``" + +#: ../../admin/traffic-server-error-messages.en.rst:153 +msgid "" +"``403`` You are not allowed to access the document at location ``URL``. " +"``access#denied``" +msgstr "" +"``403`` You are not allowed to access the document at location ``URL``. (あ" +"なたは ``URL`` にあるドキュメントへのアクセスが許されていません。) " +"``access#denied``" + +#: ../../admin/traffic-server-error-messages.en.rst:160 +msgid "``Cache Read Error``" +msgstr "``Cache Read Error``" + +#: ../../admin/traffic-server-error-messages.en.rst:158 +msgid "" +"``500`` Error reading from cache; please retry request. ``cache#read_error``" +msgstr "" +"``500`` Error reading from cache; please retry request. (キャッシュ読み込み" +"中のエラーです。リクエストをリトライしてください。) ``cache#read_error``" + +#: ../../admin/traffic-server-error-messages.en.rst:165 +msgid "``Connection Timed Out``" +msgstr "``Connection Timed Out``" + +#: ../../admin/traffic-server-error-messages.en.rst:163 +msgid "" +"``504`` Too much time has elapsed since the server has sent data. " +"``timeout#inactivity``" +msgstr "" +"``504`` Too much time has elapsed since the server has sent data. (サーバー" +"がデータを送信してから時間が経過しすぎました。) ``timeout#inactivity``" + +#: ../../admin/traffic-server-error-messages.en.rst:170 +msgid "``Content Length Required``" +msgstr "``Content Length Required``" + +#: ../../admin/traffic-server-error-messages.en.rst:168 +msgid "" +"``400`` Could not process this request because ``Content-Length`` was not " +"specified. ``request#no_content_length``" +msgstr "" +"``400`` Could not process this request because ``Content-Length`` was not " +"specified. (``Content-Length`` が指定されなかったためリクエストを処理できま" +"せんでした。) ``request#no_content_length``" + +#: ../../admin/traffic-server-error-messages.en.rst:175 +msgid "``Cycle Detected``" +msgstr "``Cycle Detected``" + +#: ../../admin/traffic-server-error-messages.en.rst:173 +msgid "" +"``400`` Your request is prohibited because it would cause an HTTP proxy " +"cycle. ``request#cycle_detected``" +msgstr "" +"``400`` Your request is prohibited because it would cause an HTTP proxy " +"cycle. (リクエストが HTTP プロキシー循環を引き起こすのでリクエストは禁止され" +"ました。) ``request#cycle_detected``" + +#: ../../admin/traffic-server-error-messages.en.rst:180 +msgid "``Forbidden``" +msgstr "``Forbidden``" + +#: ../../admin/traffic-server-error-messages.en.rst:178 +msgid "" +"``403`` ```` is not an allowed port for SSL connections (you " +"have made a request for a secure SSL connection to a forbidden port " +"number). ``access#ssl_forbidden``" +msgstr "" +"``403`` ```` is not an allowed port for SSL connections (you " +"have made a request for a secure SSL connection to a forbidden port " +"number). ``<ポート番号>`` が SSL 接続で許可されていないポートです (禁じられ" +"たポートに SSL 接続のリクエストを行いました) 。``access#ssl_forbidden``" + +#: ../../admin/traffic-server-error-messages.en.rst:190 +#: ../../admin/traffic-server-error-messages.en.rst:198 +msgid "``Host Header Required``" +msgstr "``Host Header Required``" + +#: ../../admin/traffic-server-error-messages.en.rst:183 +msgid "" +"``400`` An attempt was made to transparently proxy your request, but this " +"attempt failed because your browser did not send an HTTP ``Host`` header. " +"Manually configure your browser to use ``http://:`` " +"as the HTTP proxy. Alternatively, end users can upgrade to a browser that " +"supports the HTTP ``Host`` header field. ``interception#no_host``" +msgstr "" +"``400`` An attempt was made to transparently proxy your request, but this " +"attempt failed because your browser did not send an HTTP ``Host`` header. " +"Manually configure your browser to use ``http://:`` " +"as the HTTP proxy. Alternatively, end users can upgrade to a browser that " +"supports the HTTP ``Host`` header field. (リクエストを透過的にプロキシーする" +"よう試みましたが、ブラウザが HTTP ``Host`` ヘッダーを送信しなかったため失敗" +"しました。ブラウザが ``http://<プロキシー名>:<プロキシーポート>`` を HTTP プ" +"ロキシーとして使うように手動で設定していください。もしくは、エンドユーザー" +"が ``Host`` ヘッダーをサポートするブラウザーにアップグレードすることもできま" +"す。) ``interception#no_host``" + +#: ../../admin/traffic-server-error-messages.en.rst:193 +msgid "" +"``400`` Because your browser did not send a ``Host`` HTTP header field, the " +"virtual host being requested could not be determined. To access the website " +"correctly, you must upgrade to a browser that supports the HTTP ``Host`` " +"header field. ``request#no_host``" +msgstr "" +"``400`` Because your browser did not send a ``Host`` HTTP header field, the " +"virtual host being requested could not be determined. To access the website " +"correctly, you must upgrade to a browser that supports the HTTP ``Host`` " +"header field. (ブラウザーが ``Host`` HTTP ヘッダーを送信しなかったので、リク" +"エストされているバーチャルホストを決定できませんでした。Webサイトに正しくア" +"クセスするためには HTTP ``Host`` ヘッダーをサポートしているブラウザーにアッ" +"プグレードしなくてはなりません。) ``request#no_host``" + +#: ../../admin/traffic-server-error-messages.en.rst:204 +msgid "``HTTP Version Not Supported``" +msgstr "``HTTP Version Not Supported``" + +#: ../../admin/traffic-server-error-messages.en.rst:201 +msgid "" +"``505`` The origin server ```` is using an unsupported version " +"of the HTTP protocol. ``response#bad_version``" +msgstr "" +"``505`` The origin server ```` is using an unsupported version " +"of the HTTP protocol. (オリジンサーバー ``<サーバー名>`` はサポートされてい" +"ないバージョンの HTTP プロトコルを使用しています。) ``response#bad_version``" + +#: ../../admin/traffic-server-error-messages.en.rst:209 +msgid "``Invalid HTTP Request``" +msgstr "``Invalid HTTP Request``" + +#: ../../admin/traffic-server-error-messages.en.rst:207 +msgid "" +"``400`` Could not process this ```` HTTP method request for " +"``URL``. ``request#syntax_error``" +msgstr "" +"``400`` Could not process this ```` HTTP method request for " +"``URL``. (``URL`` への ``クライアントがリクエストしたメソッド`` HTTP メソッ" +"ドを処理できませんでした。) ``request#syntax_error``" + +#: ../../admin/traffic-server-error-messages.en.rst:214 +msgid "``Invalid HTTP Response``" +msgstr "``Invalid HTTP Response``" + +#: ../../admin/traffic-server-error-messages.en.rst:212 +#: ../../admin/traffic-server-error-messages.en.rst:217 +#: ../../admin/traffic-server-error-messages.en.rst:222 +#: ../../admin/traffic-server-error-messages.en.rst:232 +msgid "" +"``502`` The host ```` did not return the document ``URL`` " +"correctly. ``response#bad_response``" +msgstr "" +"``502`` The host ```` did not return the document ``URL`` " +"correctly. (``<サーバー名>`` というホストは ``URL`` のドキュメントを正しく返" +"しませんでした。) ``response#bad_response``" + +#: ../../admin/traffic-server-error-messages.en.rst:219 +msgid "``Malformed Server Response``" +msgstr "``Malformed Server Response``" + +#: ../../admin/traffic-server-error-messages.en.rst:224 +msgid "``Malformed Server Response Status``" +msgstr "``Malformed Server Response Status``" + +#: ../../admin/traffic-server-error-messages.en.rst:229 +msgid "``Maximum Transaction Time exceeded``" +msgstr "``Maximum Transaction Time exceeded``" + +#: ../../admin/traffic-server-error-messages.en.rst:227 +msgid "" +"``504`` Too much time has elapsed while transmitting document ``URL``. " +"``timeout#activity``" +msgstr "" +"``504`` Too much time has elapsed while transmitting document ``URL``. " +"(``URL`` のドキュメントの転送に時間が経過し過ぎました。) " +"``timeout#activity``" + +#: ../../admin/traffic-server-error-messages.en.rst:234 +msgid "``No Response Header From Server``" +msgstr "``No Response Header From Server``" + +#: ../../admin/traffic-server-error-messages.en.rst:240 +msgid "``Not Cached``" +msgstr "``Not Cached``" + +#: ../../admin/traffic-server-error-messages.en.rst:237 +msgid "" +"``504`` This document was not available in the cache, and you (the client) " +"only accept cached copies. ``cache#not_in_cache``" +msgstr "" +"``504`` This document was not available in the cache, and you (the client) " +"only accept cached copies. (このドキュメントはキャッシュ内で有効ではなく、あ" +"なた (クライアント) はキャッシュのコピーのみを受け入れます。) " +"``cache#not_in_cache``" + +#: ../../admin/traffic-server-error-messages.en.rst:246 +msgid "``Not Found on Accelerator``" +msgstr "``Not Found on Accelerator``" + +#: ../../admin/traffic-server-error-messages.en.rst:243 +msgid "" +"``404`` The request for ``URL`` on host ```` was not found. " +"Check the location and try again. ``urlrouting#no_mapping``" +msgstr "" +"``404`` The request for ``URL`` on host ```` was not found. " +"Check the location and try again. (``<サーバ名>`` 上の ``URL`` へのリクエス" +"トは見つかりませんでした。位置を確認してもう一度試してください。) " +"``urlrouting#no_mapping``" + +#: ../../admin/traffic-server-error-messages.en.rst:251 +msgid "``NULL``" +msgstr "``NULL``" + +#: ../../admin/traffic-server-error-messages.en.rst:249 +#: ../../admin/traffic-server-error-messages.en.rst:279 +msgid "" +"``502`` The host ```` did not return the document ``URL`` " +"correctly. ``response#bad_response``" +msgstr "" +"``502`` The host ```` did not return the document ``URL`` " +"correctly. (``<ホスト名>`` は ``URL`` のドキュメントを正しく返しませんでし" +"た。) ``response#bad_response``" + +#: ../../admin/traffic-server-error-messages.en.rst:256 +msgid "``Proxy Authentication Required``" +msgstr "``Proxy Authentication Required``" + +#: ../../admin/traffic-server-error-messages.en.rst:254 +msgid "" +"``407`` Please log in with username and password. " +"``access#proxy_auth_required``" +msgstr "" +"``407`` Please log in with username and password. (ユーザー名とパスワードで" +"ログインしてください。) ``access#proxy_auth_required``" + +#: ../../admin/traffic-server-error-messages.en.rst:261 +msgid "``Server Hangup``" +msgstr "``Server Hangup``" + +#: ../../admin/traffic-server-error-messages.en.rst:259 +msgid "" +"``502`` The server ```` closed the connection before the " +"transaction was completed. ``connect#hangup``" +msgstr "" +"``502`` The server ```` closed the connection before the " +"transaction was completed. (サーバー ``<ホスト名>`` はトランザクションが完了" +"する前に接続を閉じました。) ``connect#hangup``" + +#: ../../admin/traffic-server-error-messages.en.rst:266 +msgid "``Temporarily Moved``" +msgstr "``Temporarily Moved``" + +#: ../../admin/traffic-server-error-messages.en.rst:264 +msgid "" +"``302`` The document you requested, ``URL``, has moved to a new location. " +"The new location is ````. ``redirect#moved_temporarily``" +msgstr "" +"``302`` The document you requested, ``URL``, has moved to a new location. " +"The new location is ````. (リクエストしたドキュメント ``URL`` は新" +"しい場所に移動しました。新しい場所は ``<新しいURL>`` です。) " +"``redirect#moved_temporarily``" + +#: ../../admin/traffic-server-error-messages.en.rst:271 +msgid "``Transcoding Not Available``" +msgstr "``Transcoding Not Available``" + +#: ../../admin/traffic-server-error-messages.en.rst:269 +msgid "" +"``406`` Unable to provide the document ``URL`` in the format requested by " +"your browser. ``transcoding#unsupported``" +msgstr "" +"``406`` Unable to provide the document ``URL`` in the format requested by " +"your browser. (``URL`` のドキュメントをブラウザーが要求した形式で提供するこ" +"とができませんでした。) ``transcoding#unsupported``" + +#: ../../admin/traffic-server-error-messages.en.rst:276 +msgid "``Tunnel Connection Failed``" +msgstr "``Tunnel Connection Failed``" + +#: ../../admin/traffic-server-error-messages.en.rst:274 +msgid "" +"``502`` Could not connect to the server ````. " +"``connect#failed_connect``" +msgstr "" +"``502`` Could not connect to the server ````. (サーバー ``<ホスト" +"名>`` に接続できませんでした。) ``connect#failed_connect``" + +#: ../../admin/traffic-server-error-messages.en.rst:281 +msgid "``Unknown Error``" +msgstr "``Unknown Error``" + +#: ../../admin/traffic-server-error-messages.en.rst:289 +msgid "``Unknown Host``" +msgstr "``Unknown Host``" + +#: ../../admin/traffic-server-error-messages.en.rst:284 +msgid "" +"``500`` Unable to locate the server named ````; the server does " +"not have a DNS entry. Perhaps there is a misspelling in the server name or " +"the server no longer exists; double-check the name and try again. " +"``connect#dns_failed``" +msgstr "" +"``500`` Unable to locate the server named ````; the server does " +"not have a DNS entry. Perhaps there is a misspelling in the server name or " +"the server no longer exists; double-check the name and try again. (``<ホスト" +"名>`` と名付けられたサーバーを見つけられませんでした。サーバーは DNS エント" +"リーを持っていません。おそらくサーバー名のスペルミスもしくはサーバーがもう存" +"在していません。名前をダブルチェックしてもう一度試してください。) " +"``connect#dns_failed``" + +#: ../../admin/traffic-server-error-messages.en.rst:296 +msgid "``Unsupported URL Scheme``" +msgstr "``Unsupported URL Scheme``" + +#: ../../admin/traffic-server-error-messages.en.rst:292 +msgid "" +"``400`` Cannot perform your request for the document ``URL`` because the " +"protocol scheme is unknown. ``request#scheme_unsupported``" +msgstr "" +"``400`` Cannot perform your request for the document ``URL`` because the " +"protocol scheme is unknown. (プロトコルスキームが不明なため、ドキュメント " +"``URL`` へのリクエストを行えません。) ``request#scheme_unsupported``" + +#: ../../admin/traffic-server-error-messages.en.rst:301 +msgid "Standard HTTP Response Messages" +msgstr "標準的な HTTP レスポンスメッセージ" + +#: ../../admin/traffic-server-error-messages.en.rst:303 +msgid "" +"The following standard HTTP response messages are provided for your " +"information." +msgstr "次の標準的な HTTP レスポンスメッセージが参考情報として提供されます。" + +#: ../../admin/traffic-server-error-messages.en.rst:307 +msgid "``200``" +msgstr "``200``" + +#: ../../admin/traffic-server-error-messages.en.rst:307 +msgid "OK" +msgstr "OK" + +#: ../../admin/traffic-server-error-messages.en.rst:310 +msgid "``202``" +msgstr "``202``" + +#: ../../admin/traffic-server-error-messages.en.rst:310 +msgid "Accepted" +msgstr "Accepted" + +#: ../../admin/traffic-server-error-messages.en.rst:313 +msgid "``204``" +msgstr "``204``" + +#: ../../admin/traffic-server-error-messages.en.rst:313 +msgid "No Content" +msgstr "No Content" + +#: ../../admin/traffic-server-error-messages.en.rst:316 +msgid "``206``" +msgstr "``206``" + +#: ../../admin/traffic-server-error-messages.en.rst:316 +msgid "Partial Content" +msgstr "Partial Content" + +#: ../../admin/traffic-server-error-messages.en.rst:319 +msgid "``300``" +msgstr "``300``" + +#: ../../admin/traffic-server-error-messages.en.rst:319 +msgid "Multiple Choices" +msgstr "Multiple Choices" + +#: ../../admin/traffic-server-error-messages.en.rst:322 +msgid "``301``" +msgstr "``301``" + +#: ../../admin/traffic-server-error-messages.en.rst:322 +msgid "Moved Permanently" +msgstr "Moved Permanently" + +#: ../../admin/traffic-server-error-messages.en.rst:325 +msgid "``302``" +msgstr "``302``" + +#: ../../admin/traffic-server-error-messages.en.rst:325 +msgid "Found" +msgstr "Found" + +#: ../../admin/traffic-server-error-messages.en.rst:328 +msgid "``303``" +msgstr "``303``" + +#: ../../admin/traffic-server-error-messages.en.rst:328 +msgid "See Other" +msgstr "See Other" + +#: ../../admin/traffic-server-error-messages.en.rst:331 +msgid "``304``" +msgstr "``304``" + +#: ../../admin/traffic-server-error-messages.en.rst:331 +msgid "Not Modified" +msgstr "Not Modified" + +#: ../../admin/traffic-server-error-messages.en.rst:334 +msgid "``400``" +msgstr "``400``" + +#: ../../admin/traffic-server-error-messages.en.rst:334 +msgid "Bad Request" +msgstr "Bad Request" + +#: ../../admin/traffic-server-error-messages.en.rst:337 +msgid "``401``" +msgstr "``401``" + +#: ../../admin/traffic-server-error-messages.en.rst:337 +msgid "Unauthorized; retry" +msgstr "Unauthorized; retry" + +#: ../../admin/traffic-server-error-messages.en.rst:340 +msgid "``403``" +msgstr "``403``" + +#: ../../admin/traffic-server-error-messages.en.rst:340 +msgid "Forbidden" +msgstr "Forbidden" + +#: ../../admin/traffic-server-error-messages.en.rst:343 +msgid "``404``" +msgstr "``404``" + +#: ../../admin/traffic-server-error-messages.en.rst:343 +msgid "Not Found" +msgstr "Not Found" + +#: ../../admin/traffic-server-error-messages.en.rst:346 +msgid "``405``" +msgstr "``405``" + +#: ../../admin/traffic-server-error-messages.en.rst:346 +msgid "Method Not Allowed" +msgstr "Method Not Allowed" + +#: ../../admin/traffic-server-error-messages.en.rst:349 +msgid "``406``" +msgstr "``406``" + +#: ../../admin/traffic-server-error-messages.en.rst:349 +msgid "Not acceptable" +msgstr "Not acceptable" + +#: ../../admin/traffic-server-error-messages.en.rst:352 +msgid "``408``" +msgstr "``408``" + +#: ../../admin/traffic-server-error-messages.en.rst:352 +msgid "Request Timeout" +msgstr "Request Timeout" + +#: ../../admin/traffic-server-error-messages.en.rst:355 +msgid "``500``" +msgstr "``500``" + +#: ../../admin/traffic-server-error-messages.en.rst:355 +msgid "Internal server error" +msgstr "Internal server error" + +#: ../../admin/traffic-server-error-messages.en.rst:358 +msgid "``501``" +msgstr "``501``" + +#: ../../admin/traffic-server-error-messages.en.rst:358 +msgid "Not Implemented" +msgstr "Not Implemented" + +#: ../../admin/traffic-server-error-messages.en.rst:361 +msgid "``502``" +msgstr "``502``" + +#: ../../admin/traffic-server-error-messages.en.rst:361 +msgid "Bad Gateway" +msgstr "Bad Gateway" + +#: ../../admin/traffic-server-error-messages.en.rst:364 +msgid "``504``" +msgstr "``504``" + +#: ../../admin/traffic-server-error-messages.en.rst:364 +msgid "Gateway Timeout" +msgstr "Gateway Timeout" diff --git a/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy.en.po b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy.en.po new file mode 100644 index 00000000..159ebc98 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy.en.po @@ -0,0 +1,218 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.# + +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/transparent-proxy.en.rst:4 +msgid "Transparent Proxying" +msgstr "透過プロキシー" + +#: ../../admin/transparent-proxy.en.rst:32 +msgid "" +"Transparent Proxying is the ability of a proxy (such as ATS) to intercept " +"connections between clients and servers without being visible." +msgstr "" +"透過プロキシーは (ATS などの ) プロキシーがクライアントとサーバーの間の接続" +"を見えないように横取りする能力です。" + +#: ../../admin/transparent-proxy.en.rst:35 +msgid "" +"The general network structure that will be used in this documentation looks " +"like this --" +msgstr "" +"このドキュメント内で使用される全体的なネットワーク構成はこのようになっていま" +"す。" + +#: ../../admin/transparent-proxy.en.rst:42 +msgid "ATS basic traffic flow of Transparent Proxy" +msgstr "透過プロキシーの ATS での基本的なトラッフィクの流れ" + +#: ../../admin/transparent-proxy.en.rst:44 +msgid "" +"There must be a gateway device through which passes all network traffic " +"from the client to the Internet (or external cloud). The gateway is " +"responsible for effectively splicing ATS in to selected streams of that " +"traffic. Each traffic stream is split in to two, with ATS terminating both " +"sides. That is, stream green-1, red-2, is split in to the green connection " +"and the red connection. Note that ATS may or may not be on the gateway " +"system, the redirected traffic can flow over other network infrastructure." +msgstr "" +"クライアントからインターネット ( もしくは外部のクラウド ) へのすべてのネット" +"ワークトラフィックを通すゲートウェイデバイスがなくてはなりません。このゲート" +"ウェイは効率的に ATS をそのトラフィックの選択されたストリームに継ぐことに責" +"任をもちます。各トラフィックストリームは二つに分割され、ATS は両側を終端しま" +"す。これは、ストリーム緑-1、赤-2 は緑の接続と赤の接続に分割されるということ" +"です。ATS はゲートウェイ上にあることもそうでないこともあることに注意してくだ" +"さい。向き先の変わったトラフィックは他のネットワーク基盤に流れ出ることがあり" +"ます。" + +#: ../../admin/transparent-proxy.en.rst:53 +msgid "" +"Because ATS uses two connections transparency can be set independently on " +"the client and origin server (Internet / external cloud) side. We will " +"define what is generally called \"transparent proxy\" as two aspects, " +"*inbound transparency* and *outbound transparency*." +msgstr "" +"なぜなら ATS はクライアント側とオリジンサーバー ( インターネット / 外部クラ" +"ウド ) の側とで独立して設定される可能性のある二つの接続透過性を使用するから" +"です。二つの面、\\ *内向き透過性*\\ と\\ *外向き透過性*\\ で何が一般的に\"透" +"過プロキシー\"と呼ばれるのかを定義していきます。" + +#: ../../admin/transparent-proxy.en.rst:58 +msgid "" +"Inbound transparency is a proxy that is transparent to connections that are " +"inbound to the proxy, i.e. a connection initiated by a client which " +"connects to the proxy (green-1). Similarly, outbound transparency is a " +"proxy that is transparent to connections that are outbound from the proxy, " +"i.e. a connection initiated by the proxy to an origin server (red-2)." +msgstr "" +"内向き透過性はプロキシーに入ってくる接続に対して透過なプロキシーです。例えば" +"プロキシーに接続するクライアントによって開始される接続 ( 緑-1 ) です。同様" +"に、外向き透過性はプロキシーから出ていく接続に対して透過なプロキシーです。例" +"えば、オリジンサーバーに接続するプロキシーによって開始される接続 ( 赤-2 ) で" +"す。" + +#: ../../admin/transparent-proxy.en.rst:65 +msgid "" +"In most treatments these two types of transparency are treated as unitarily " +"but that is not required. This implementation supports transparency " +"independently on the two (client, origin server) sides (`use cases `_." +msgstr "" +"これら二つのタイプの透過性のほとんどの扱いは一元的なものとして扱われますが、" +"それは必須ではありません。この実装は透過性を二つのサイド ( クライアント、オ" +"リジンサーバー ) で独立してサポートします。( `ユースケース `_)" + +#: ../../admin/transparent-proxy.en.rst:70 +msgid "" +"It is critical to note that any transparency requires specialized routing " +"and cannot be done solely by configuring ATS. ATS transparency also " +"requires support from the Linux kernel and therefore currently only works " +"on sufficiently recent Linux kernels that support the following features --" +msgstr "" +"どの透過性も特別なルーティングを必要とし、単に ATS を設定するだけでは完了で" +"きないことに十分に注意してください。ATS の透過性は Linux カーネルのサポート" +"も必要とするので、現在は次の機能をサポートした最近の Linux カーネルでしか十" +"分に動作しません。" + +#: ../../admin/transparent-proxy.en.rst:76 +msgid "TPROXY" +msgstr "TPROXY" + +#: ../../admin/transparent-proxy.en.rst:77 +msgid "POSIX capabilities" +msgstr "POSIX ケーパビリティ" + +#: ../../admin/transparent-proxy.en.rst:79 +msgid "" +"In addition the specialized routing will require using ``iptables`` and in " +"some cases ``ebtables``." +msgstr "" +"また特殊なルーティングは ``iptables`` を必要とし、いくつかのケースでは " +"``ebtables`` を必要とします。" + +#: ../../admin/transparent-proxy.en.rst:82 +msgid "" +"Standard build procedures should work for transparency support but if not " +"consult these :ref:`more detailed instructions `" +msgstr "" +"標準のビルド手順は透過プロキシーの対応でも使えるはずですが、うまくいかない場" +"合は\\ :ref:`より詳細な手順 `\\ を参考にして" +"ください。" + +#: ../../admin/transparent-proxy.en.rst:85 +msgid "" +"Transparency is configured per server port not globally. This is done via " +"the configuration values :ts:cv:`proxy.config.http.server_ports`. In " +"addition, :ts:cv:`proxy.config.reverse_proxy.enabled` must be enabled if " +"the client side is transparent. That should be fixed in a future patch." +msgstr "" +"透過性はグローバルにではなくサーバーのポート毎に設定されます。これは :ts:cv:" +"`proxy.config.http.server_ports` の設定値によって行われます。また、クライア" +"ント側が透過の場合は :ts:cv:`proxy.config.reverse_proxy.enabled` が有効化さ" +"れていなければなりません。これは将来のパッチで修正されます。" + +#: ../../admin/transparent-proxy.en.rst:90 +msgid "" +"In the first case use the attribute character (replacing the default 'X')" +msgstr "" +"最初のケースは ( デフォルトの 'X' を置き換えている) 属性文字を使用します" + +#: ../../admin/transparent-proxy.en.rst:93 +msgid "**Attribute** **Transparency Style** **Reverse Proxy**" +msgstr "**属性** **透過性スタイル** **リバースプロキシー**" + +#: ../../admin/transparent-proxy.en.rst:96 +msgid "``=``" +msgstr "``=``" + +#: ../../admin/transparent-proxy.en.rst:96 +msgid "Full transparency: either" +msgstr "完全透過性: どちらか" + +#: ../../admin/transparent-proxy.en.rst:99 +msgid "``>``" +msgstr "``>``" + +#: ../../admin/transparent-proxy.en.rst:99 +msgid "Inbound (client) transparency: enabled" +msgstr "内向き ( クライアント ) 透過性: 有効" + +#: ../../admin/transparent-proxy.en.rst:102 +msgid "``<``" +msgstr "``<``" + +#: ../../admin/transparent-proxy.en.rst:102 +msgid "Outbound (origin server) transparency: either" +msgstr "外向き ( オリジンサーバー ) 透過性: どちらか" + +#: ../../admin/transparent-proxy.en.rst:104 +msgid "" +"In the outbound transparent case clients must connect directly to ATS " +"either through an explicit proxy mechanism or by advertising the IP address " +"of the ATS server via DNS as the origin server address." +msgstr "" +"外向き透過性のケースではクライアントは明示的プロキシーの仕組みを通してか " +"ATS サーバーの IP アドレスをオリジンサーバーのアドレスとして DNS で広告する" +"かのどちらかで ATS に直接接続しなくてはなりません。" + +#: ../../admin/transparent-proxy.en.rst:108 +msgid "Some tested scenarios --" +msgstr "いくつかの実験済みのシナリオ" + +#: ../../admin/transparent-proxy.en.rst:110 +msgid ":doc:`transparent-proxy/bridge.en`" +msgstr ":doc:`transparent-proxy/bridge.en`" + +#: ../../admin/transparent-proxy.en.rst:111 +msgid ":doc:`transparent-proxy/router-inline.en`" +msgstr ":doc:`transparent-proxy/router-inline.en`" + +#: ../../admin/transparent-proxy.en.rst:112 +msgid ":doc:`transparent-proxy/wccp-configuration.en`" +msgstr ":doc:`transparent-proxy/wccp-configuration.en`" diff --git a/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/bridge.en.po b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/bridge.en.po new file mode 100644 index 00000000..06faa2e6 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/bridge.en.po @@ -0,0 +1,298 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/transparent-proxy/bridge.en.rst:2 +msgid "Inline on a Linux Bridge" +msgstr "Linux ブリッジ上でのインライン" + +#: ../../admin/transparent-proxy/bridge.en.rst:23 +msgid "" +"A Linux can be configured to operate in `bridge mode `_. Two or " +"more physical interfaces are assigned to the bridge. A single IP address is " +"shared across the interfaces. By default any packet that arrives on one " +"interface is immediately routed out another bridge interface." +msgstr "" +"Linux は\\ `ブリッジモード `_\\ で動作するように設定できます。二つ以上の物" +"理インターフェースがブリッジに割り当てられます。一つの IP アドレスはそれらの" +"インターフェース間で共有されます。デフォルトでは一つのインターフェースに到着" +"したあらゆるパケットは直ちにもう一つのブリッジインターフェースに送り出されま" +"す。" + +#: ../../admin/transparent-proxy/bridge.en.rst:29 +msgid "Linux packages required:" +msgstr "必要な Linux のパッケージ" + +#: ../../admin/transparent-proxy/bridge.en.rst:31 +msgid "bridge-utils" +msgstr "bridge-utils" + +#: ../../admin/transparent-proxy/bridge.en.rst:32 +msgid "ebtables" +msgstr "ebtables" + +#: ../../admin/transparent-proxy/bridge.en.rst:34 +msgid "" +"In our example of setting up bridge mode we will use a local address of " +"192.168.1.11/24 and interfaces ``eth0`` and ``eth1`` as the bridge " +"interfaces (more detailed documentation is available `here `_). You may omit the " +"'#' character and everything after it. ::" +msgstr "" +"ブリッジモードの設定例では 192.168.1.11/24 のローカルアドレスとインター" +"フェース ``eth0`` と ``eth1`` をブリッジインターフェースとして使用します " +"( より詳細なドキュメントは `ここ `_ にあります) 。'#' とその後ろのものはすべ" +"て省略できます。 ::" + +#: ../../admin/transparent-proxy/bridge.en.rst:49 +msgid "" +"If you have not already done so, remember to add a default route, such as " +"this one for a gateway of 192.168.1.1. ::" +msgstr "" +"もしまだ済ませていない場合は、これが 192.168.1.1 のゲートウェイのためのもの" +"であると、デフォルト経路を追加することを覚えておいてください。" + +#: ../../admin/transparent-proxy/bridge.en.rst:54 +msgid "" +"At this point it is a good idea to test connectivity to verify the basic " +"bridge is functional." +msgstr "" +"この時点で基本的なブリッジが動作することを確認するために接続性をテストしてお" +"くと良いです。" + +#: ../../admin/transparent-proxy/bridge.en.rst:57 +msgid "" +"Once the bridge is verified to work, this is the basic traffic pattern of " +"interest." +msgstr "" +"ブリッジが動作することを確認できたなら、これがお望みの基本的なトラフィックパ" +"ターンです。" + +#: ../../admin/transparent-proxy/bridge.en.rst:64 +msgid "Picture of traffic flow through a bridge with ATS" +msgstr "ATS を使ったブリッジを通るトラフィックの流れ" + +#: ../../admin/transparent-proxy/bridge.en.rst:66 +msgid "" +"The green arrows are packets originating from the client and the red arrows " +"are packets originating from the origin server. All traffic not directed to " +"the local address will pass through the bridge. We need to break into some " +"of the traffic and subject it to routing so that it can be routed to ATS. " +"This requires ``ebtables``. The flows we want to intercept are green 1 " +"(from client to bridge) and red 1 (origin server to bridge)." +msgstr "" +"緑の矢印はクライアントからのパケットで、赤の矢印はオリジンサーバーからのパ" +"ケットです。ローカルアドレス宛てではないすべてのトラフィックはブリッジを通り" +"ます。トラフィックに割って入り ATS に向かうようにする必要があります。これに" +"は ``ebtables`` が必要となります。横取りしたい流れは緑 1 ( クライアントから" +"ブリッジ ) と赤 1 ( オリジンサーバーからブリッジ ) です。" + +#: ../../admin/transparent-proxy/bridge.en.rst:94 +msgid "" +"Traffic Server operates at layer 3 so we need to use ``iptables`` to handle " +"IP packets appropriately.::" +msgstr "" +"Traffic Server はレイヤー 3 で動作するので IP パケットを適切に扱うために " +"``iptables`` を使用する必要があります。 ::" + +#: ../../admin/transparent-proxy/bridge.en.rst:102 +msgid "" +"At this point the directionality of the interfaces matters. For the example " +"``eth1`` is the inbound (client side) interface, while ``eth0`` is the " +"outbound (origin server side) interface. We mark both flows of packets so " +"that we can use policy routing on them. For inbound packets we need to use " +"``TPROXY`` to force acceptance of packets to foreign IP addresses. For " +"returning outbound packets there will be a socket open bound to the foreign " +"address, we need only force it to be delivered locally. The value for ``--" +"on-ip`` is 0 because the target port is listening and not bound to a " +"specific address. The value for ``--on-port`` must match the Traffic Server " +"server port. Otherwise its value is arbitrary. ``--dport`` and ``--sport`` " +"specify the port from the point of view of the clients and origin servers." +msgstr "" +"ここでインターフェースの方向性が問題となります。例では ``eth1`` は内向き " +"( クライアント側 ) インターフェース、``eth0`` は外向き ( オリジンサーバー" +"側 ) インターフェースです。両方のパケットの流れでポリシーに沿ったルーティン" +"グができるように印を付けます。内向きのパケットでは外部の IP アドレスへのパ" +"ケットの受け付けを強制するために ``TPROXY`` を使う必要があります。外向きのパ" +"ケットを返すためには外部のアドレスに結びつけられているソケットが開かれてお" +"り、それを内部的に配達されることのみを強制する必要があります。ターゲットポー" +"トは listen 中で特定のアドレスには bind されていないので ``--on-ip`` の値は " +"0 です。``--on-port`` の値は Traffic Server のサーバーポートと一致しなければ" +"なりません。そうでなければその値は任意です。``--dport`` と ``--sport`` はク" +"ライアントとオリジンサーバーの視点でのポートを指定します。" + +#: ../../admin/transparent-proxy/bridge.en.rst:115 +msgid "" +"Once the flows are marked we can force them to be delivered locally via the " +"loopback interface via a policy routing table.::" +msgstr "" +"流れに印が付けられたのでそれらがポリシーに沿ったルーティングテーブルにより" +"ループバックインターフェースで内部的に配達されることを強制できます。" + +#: ../../admin/transparent-proxy/bridge.en.rst:121 +msgid "" +"The marking used is arbitrary but it must be consistent between " +"``iptables`` and the routing rule. The table number must be in the range " +"1..253." +msgstr "" +"使用済みと印を付けることは任意ですが、``iptables`` とルーティングルールとの" +"間で矛盾がないようにしなければなりません。テーブル番号は 1 から 253 の間でな" +"ければなりません。" + +#: ../../admin/transparent-proxy/bridge.en.rst:125 +msgid "" +"To configure Traffic Server set the following values in :file:`records." +"config`" +msgstr "" +"Traffic Server を設定するために次の :file:`records.config` の値を設定しま" +"す。" + +#: ../../admin/transparent-proxy/bridge.en.rst:128 +msgid "" +":ts:cv:`proxy.config.http.server_ports` *value from* ``--on-port`` (see " +"below)" +msgstr "" +":ts:cv:`proxy.config.http.server_ports` *の値は* ``--on-port`` (以下を参照)" + +#: ../../admin/transparent-proxy/bridge.en.rst:130 +msgid ":ts:cv:`proxy.config.reverse_proxy.enabled` ``1``" +msgstr ":ts:cv:`proxy.config.reverse_proxy.enabled` ``1``" + +#: ../../admin/transparent-proxy/bridge.en.rst:132 +msgid ":ts:cv:`proxy.config.url_remap.remap_required` ``0``" +msgstr ":ts:cv:`proxy.config.url_remap.remap_required` ``0``" + +#: ../../admin/transparent-proxy/bridge.en.rst:134 +msgid "" +"You may also need to set :ts:cv:`proxy.config.cluster.ethernet_interface` " +"to \"br0\" (the name of the bridge interface from the ``_)." +msgstr "" +":ts:cv:`proxy.config.cluster.ethernet_interface` を \"br0\" に設定する必要も" +"あります。( ブリッジインターフェースの名前は ``_ からとって" +"ください。)" + +#: ../../admin/transparent-proxy/bridge.en.rst:138 +msgid "Additional troubleshooting" +msgstr "追加のトラブルシューティング" + +#: ../../admin/transparent-proxy/bridge.en.rst:143 +msgid "" +"It is frequently the case that the default tables prevent incoming HTTP. " +"You can clear all filters with the commands::" +msgstr "" +"デフォルトテーブルが入ってくる HTTP を妨害するケースがよくあります。次のコマ" +"ンドですべてのフィルターを消去することができます。" + +#: ../../admin/transparent-proxy/bridge.en.rst:149 +msgid "" +"That is a bit drastic and should only be used for testing / debugging. A " +"live system will likely need some filters in place but that is beyond the " +"scope of this document. If this fixes the problem, then your filter set is " +"too restrictive." +msgstr "" +"これは劇的でテスト / デバッグのためだけに使用すべきです。稼働中のシステムは" +"ここにいくつかのフィルターを必要としますが、それはこのドキュメントの範囲外で" +"す。もしこれが問題を解決するのであれば、フィルターの制限が厳しすぎます。" + +#: ../../admin/transparent-proxy/bridge.en.rst:154 +msgid "" +"Note that this problem will prevent the basic bridge (without ATS) from " +"allowing HTTP traffic through." +msgstr "" +"この問題は HTTP のトラフィックを通過させる ( ATS を使わない ) 基本的なブリッ" +"ジ動作を妨げていることに注意してください。" + +#: ../../admin/transparent-proxy/bridge.en.rst:173 +msgid "Footnotes" +msgstr "脚注" + +#: ../../admin/transparent-proxy/bridge.en.rst:175 +msgid "" +"The ``--redirect-target`` can be omitted, but then the ``iptables`` rules " +"would need to use ``--physdev`` instead of just ``-i``. The actual packet " +"processing is identical." +msgstr "" +"``--redirect-target`` は省略可能ですが、``iptables`` のルールで単に ``-i`` " +"とする代わりに ``--physdev`` を使う必要があるでしょう。実際のパケット処理は" +"まったく同じです。" + +#: ../../admin/transparent-proxy/bridge.en.rst:74 +msgid "" +"In this example we will intercept port 80 (HTTP) traffic. We will use the " +"``BROUTING`` chain because it is traversed only for packets that originated " +"externally and arrived on a (forwarding enabled) interface. Although it " +"looks like this will intercept all port 80 traffic it will only affect the " +"two flows described above. ``-j redirect`` marks the packet as being " +"diverted to the bridge and not forwarded, and the ``DROP`` target puts the " +"packets in the normal ``iptables`` routing so that we can use standard " +"device tests on them [1]_. Although this example handles only port 80, " +"other ports are the same except for the port value. Note also the port here " +"is the port from the point of view of the clients and origin servers, not " +"the Traffic Server server port. ::" +msgstr "" +"この例ではポート 80 (HTTP) のトラフィックを横取りします。外部から発信され " +"( 転送が有効化された ) インターフェースに到着したパケットのみを扱いたいので " +"``BROUTING`` チェーンを使用します。ポート 80 のすべてのトラフィックを横取り" +"するように見えますが、上記の流れのみに影響を与えます。``-j redirect`` はパ" +"ケットがブリッジに流れ、転送はされないように印を付け、``DROP`` ターゲットは" +"パケットを標準的なデバイスのテストができるように通常の ``iptables`` のルー" +"ティングに置きます。 [1]_ この例はポート 80 のみを扱いますが、他のポートも" +"ポート番号が違うことを除いて同じです。ここでのポートはクライアントとオリジン" +"サーバーの視点でのものであり、Traffic Server のサーバーポートではないことに" +"注意してください。" + +#: ../../admin/transparent-proxy/bridge.en.rst:140 +msgid "" +"Check to make sure that ``iptables`` is not filtering (blocking) incoming " +"HTTP connections." +msgstr "" +"``iptables`` が入ってくる HTTP 接続をフィルタ ( ブロック ) していないことを" +"確認してください。" + +#: ../../admin/transparent-proxy/bridge.en.rst:157 +msgid "Verify that IP packet forwarding is enabled." +msgstr "IP パケット転送が有効化されていることを確認してください。" + +#: ../../admin/transparent-proxy/bridge.en.rst:159 +msgid "You can check this with::" +msgstr "これは次のように確認できます。 ::" + +#: ../../admin/transparent-proxy/bridge.en.rst:163 +msgid "" +"The output should be a non-zero value (usually '1'). If it is zero, you can " +"set it with::" +msgstr "" +"出力は非ゼロ値 ( たいてい '1' ) のはずです。もしゼロの場合は次のように設定可" +"能です。" + +#: ../../admin/transparent-proxy/bridge.en.rst:168 +msgid "" +"This can setting can be persisted by putting it in ``/etc/sysctl.conf``: ::" +msgstr "この設定は ``/etc/sysctl.conf`` に書くことで恒久的にできます。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/build.en.po b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/build.en.po new file mode 100644 index 00000000..a3500417 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/build.en.po @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/transparent-proxy/build.en.rst:4 +msgid "Building ATS for transparency" +msgstr "透過用 ATS のビルド" + +#: ../../admin/transparent-proxy/build.en.rst:24 +msgid "" +"In most cases, if your environment supports transparency then ``configure`` " +"will automatically enable it. For other environments you may need to " +"twiddle the ``configure`` options." +msgstr "" +"ほとんどのケースでは、環境が透過性をサポートしていれば ``configure`` が自動" +"的にそれを有効化します。そうでない環境では ``configure`` のオプションをいじ" +"る必要があるでしょう。" + +#: ../../admin/transparent-proxy/build.en.rst:34 +msgid "``--enable-posix-cap``" +msgstr "``--enable-posix-cap``" + +#: ../../admin/transparent-proxy/build.en.rst:29 +msgid "" +"This enables POSIX capabilities, which are required for transparency. These " +"are compiled in by default. To check your system, look for the header file " +"``sys/capability.h`` and the system library ``libcap``. These are in the " +"packages ``libcap`` and ``libcap-devel`` or ``libcap-dev`` (depending on " +"the Distribution) contra-respectively." +msgstr "" +"これは透過性で必要とされる POSIX ケーパビリティを有効化します。これらはデ" +"フォルトで自動的にコンパイルされます。システムを確認するために、ヘッダーファ" +"イル ``sys/capability.h`` とシステムライブラリ ``libcap`` を探します。これら" +"はそれぞれ ``libcap`` と ``libcap-devel`` または ``libcap-dev`` ( ディストリ" +"ビューションに依ります ) パッケージ内にあります。" + +#: ../../admin/transparent-proxy/build.en.rst:52 +msgid "``--enable-tproxy[=value]``" +msgstr "``--enable-tproxy[=value]``" + +#: ../../admin/transparent-proxy/build.en.rst:37 +msgid "" +"Enable TPROXY support, which is the Linux kernel feature used for " +"transparency. This should be present in the base installation, there is no " +"package associated with it. \\* ``auto`` Do automatic checks for the the " +"TPROXY header file (``linux/in.h``) and enable TPROXY support if the " +"``IP_TRANSPARENT`` definition is present. This is the default if this " +"option is not specified or ``value`` is omitted. \\* ``no`` Do not check " +"for TPROXY support, disable support for it. \\* ``force`` Do not check for " +"TPROXY support, enable it using the $ats@ built in value for " +"``IP_TRANSPARENT``. This is useful for systems that have it in the kernel " +"for but some reason do not have the appropriate system header file. \\* " +"*number* Do not check for TPROXY support, use *number* as the " +"``IP_TRANSPARENT`` value. There are, at present, no known standard " +"distributions of Linux that support TPROXY but use a value different from " +"the built in ATS default. However, a custom built kernel may do so and in " +"that case the specific value can be specified." +msgstr "" +"透過で使用される Linux カーネルの機能である TPROXY サポートを有効化します。" +"これは基本的なインストールに含まれているはずであり、これに紐付くパッケージは" +"ありません。\\* ``auto`` TPROXY ヘッダーファイル (``linux/in.h``) を自動的" +"に確認し、``IP_TRANSPARENT`` の定義が存在する場合は TPROXY サポートを有効化" +"します。これはこのオプションが指定されていないか ``value`` が省略された場合" +"のデフォルトです。\\* ``no`` TPROXY サポートを確認せず、そのサポートを無効化" +"します。\\* ``force`` TPROXY サポートを確認せず、``IP_TRANSPARENT`` のため" +"の $ats@ 組み込みの値を使用して有効化します。これはカーネルにはあるものの何" +"らかの理由で適切なシステムヘッダーファイルを持っていないシステムの場合に便利" +"です。\\* *number* TPROXY サポートを確認せず、 *number* を " +"``IP_TRANSPARENT`` の値として使用します。いまのところ、TPROXY をサポートして" +"いて ATS 組み込みのデフォルトと異なる値を使用することが知られている標準的な" +"ディストリビューションはありません。しかし、カスタムビルドのカーネルはそうし" +"ているかも知れず、その場合には特定の値が指定できます。" + +#: ../../admin/transparent-proxy/build.en.rst:54 +msgid "" +"In the default case, ATS configuration will automatically check for TPROXY " +"support via the presence of the ``linux/in.h`` header file and compile in " +"TPROXY support if it is available. If that fails, you may be able to " +"recover by using one of the options above. Note that transparency may be " +"built in by default but it is not active unless explicitly enabled in the " +"ATS configuration files." +msgstr "" +"デフォルトでは、ATS の設定はヘッダーファイル ``linux/in.h`` の存在により " +"TPROXY の対応を自動的に確認し、それが有効なら TPROXY 対応でコンパイルしま" +"す。これが失敗した場合は、たぶん上記のオプションのどれか一つを使うことで回復" +"可能です。透過性はデフォルトでビルドされることがありますが、ATS の設定ファイ" +"ルで明示的に有効化しない限り動作しないことに注意してください。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/router-inline.en.po b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/router-inline.en.po new file mode 100644 index 00000000..ef9a6e72 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/router-inline.en.po @@ -0,0 +1,162 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/transparent-proxy/router-inline.en.rst:2 +msgid "Inline on a Linux router" +msgstr "Linux ルーター上でのインライン" + +#: ../../admin/transparent-proxy/router-inline.en.rst:21 +msgid "" +"The routed set up presumes the set of clients are on distinct networks " +"behind a single physical interface. For the purposes of this example will " +"we presume" +msgstr "" +"経路セットアップはクライアントは一つの物理インターフェースの後ろにある別々の" +"ネットワーク上に存在すると仮定します。この例の目的を次のように仮定します。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:25 +msgid "The clients are on network 172.28.56.0/24" +msgstr "クライアントは 172.28.56.0/24 のネットワーク上にあります" + +#: ../../admin/transparent-proxy/router-inline.en.rst:26 +msgid "The router connects the networks 172.28.56.0/24 and 192.168.1.0/24" +msgstr "" +"ルーターは 172.28.56.0/24 と 192.168.1.0/24 のネットワークを接続しています。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:27 +msgid "Interface ``eth0`` is on the network 192.168.1.0/24" +msgstr "" +"インターフェース ``eth0`` は 192.168.1.0/24 のネットワーク上にあります。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:28 +msgid "Interface ``eth1`` is on the network 172.28.56.0/24" +msgstr "" +"インターフェース ``eth1`` は 172.28.56.0/24 のネットワーク上にあります。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:29 +msgid "" +"The router is already configured to route traffic correctly for the clients." +msgstr "" +"このルーターはクライアントのためにトラフィックを正しく転送するようにすでに設" +"定済みです。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:32 +msgid "" +"In this example we will intercept port 80 (HTTP) traffic that traverses the " +"router. The first step is to use ``iptables`` to handle IP packets " +"appropriately." +msgstr "" +"この例ではルーターを通るポート 80 (HTTP) のトラフィックを横取りします。最初" +"のステップは IP パケットを適切に扱うために ``iptables`` を使うことです。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:47 +msgid "" +"We mark packets so that we can use policy routing on them. For inbound " +"packets we use ``TPROXY`` to make it possible to accept packets sent to " +"foreign IP addresses. For returning outbound packets there will be a socket " +"open bound to the foreign address, we need only force it to be delivered " +"locally. The value for ``--on-ip`` is 0 because the target port is " +"listening and not bound to a specific address. The value for ``--on-port`` " +"must match the Traffic Server server port. Otherwise its value is " +"arbitrary. ``--dport`` and ``--sport`` specify the port from the point of " +"view of the clients and origin servers. The middle two lines exempt local " +"web traffic from being marked for Traffic Server -- these rules can be " +"tightened or loosened as needed. They server by matching traffic and " +"exiting the ``iptables`` processing via ``ACCEPT`` before the last line is " +"checked." +msgstr "" +"これらについてポリシールーティングができるようにパケットに印を付けます。内向" +"きのパケットでは外部の IP アドレスに送信されるパケットを受け付けるために " +"``TPROXY`` を使用します。外向きのパケットを返すために外部アドレスに bind さ" +"れたソケットがあり、内部的に配送されることのみを強制します。``--on-ip`` の値" +"は目的のポートは特定のアドレスに bind されておらず listen 状態なので 0 で" +"す。``--on-port`` の値は Traffic Server のサーバーポートと一致しなければ成り" +"ませんが、その他の点では任意の値を使用できます。 ``--dport`` と ``--sport`` " +"はクライアントとオリジンサーバーの視点でのポートを指定します。真ん中の二行は" +"印の付けられた Traffic Server へのローカルなウェブトラフィックを通します。こ" +"れらのルールは必要に応じて厳しくすることも緩めることも可能です。トラフィック" +"のマッチングと最後の行が確認される前の ``iptables`` での ``ACCEPT`` による処" +"理により提供されます。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:61 +msgid "" +"Once the flows are marked we can force them to be delivered locally via the " +"loopback interface via a policy routing table." +msgstr "" +"一度フローに印が付けられると、それらがポリシールーティングテーブルによりルー" +"プバックインターフェースで内部的に配送されることを強制できます。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:69 +msgid "" +"The marking used is arbitrary but it must be consistent between " +"``iptables`` and the routing rule. The table number must be in the range " +"1..253." +msgstr "" +"使用済みという印付けは独断的に行われますが、``iptables`` とルーティングルー" +"ルとの間で一貫していなければなりません。テーブル番号は 1 から 253 の間でなけ" +"ればなりません。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:73 +msgid "" +"To configure Traffic Server set the following values in :file:`records." +"config`" +msgstr "" +"Traffic Server を設定するために次の値を :file:`records.config` に設定してく" +"ださい。" + +#: ../../admin/transparent-proxy/router-inline.en.rst:78 +msgid "``proxy.config.http.server_port``" +msgstr "``proxy.config.http.server_port``" + +#: ../../admin/transparent-proxy/router-inline.en.rst:81 +msgid "``STRING`` Default: ``=``" +msgstr "``STRING`` デフォルト: ``=``" + +#: ../../admin/transparent-proxy/router-inline.en.rst:86 +msgid "``proxy.config.reverse_proxy.enabled``" +msgstr "``proxy.config.reverse_proxy.enabled``" + +#: ../../admin/transparent-proxy/router-inline.en.rst:85 +msgid "``INT`` Default: ``1``" +msgstr "``INT`` デフォルト: ``1``" + +#: ../../admin/transparent-proxy/router-inline.en.rst:90 +msgid "``proxy.config.url_remap.remap_required``" +msgstr "``proxy.config.url_remap.remap_required``" + +#: ../../admin/transparent-proxy/router-inline.en.rst:89 +msgid "``INT`` Default: ``0``" +msgstr "``INT`` デフォルト: ``0``" + +#: ../../admin/transparent-proxy/router-inline.en.rst:77 +msgid "``STRING`` Default: *value from* ``--on-port``" +msgstr "``STRING`` デフォルト: ``--on-port`` *の値*" + +#: ../../admin/transparent-proxy/router-inline.en.rst:82 +msgid "``proxy.config.http.server_port_attr``" +msgstr "``proxy.config.http.server_port_attr``" diff --git a/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/wccp-configuration.en.po b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/wccp-configuration.en.po new file mode 100644 index 00000000..fd242a3b --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/transparent-proxy/wccp-configuration.en.po @@ -0,0 +1,300 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:2 +msgid "WCCP Configuration" +msgstr "WCCP 設定" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:25 +msgid "" +"`WCCP `_ is de-facto semi-standard used by routers to redirect network " +"traffic to caches. It is available on most Cisco™ routers although it does " +"not appear to be officially supported by Cisco. The primary benefits of " +"WCCP are" +msgstr "" +"`WCCP `_ はルーターがネットワークトラフィックをキャッシュに向ける準デファクト" +"スタンダードです。Cisco™ から公式にサポートされているようには見えないもの" +"の、多くの Cisco のルーターで利用可能です。WCCP の主な利益は次のとおりです。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:33 +msgid "" +"WCCP fails open so that if the Traffic Server machine fails, it is bypassed " +"and users continue to have Internet access." +msgstr "" +"Traffic Server マシンが失敗するように WCCP がオープンに失敗すると、それはバ" +"イパスされユーザーはインターネットへのアクセスを継続できます。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:42 +msgid "" +"Because the router serves as the inline network element, Traffic Server " +"must run on a separate host. This host can be located anywhere as long as " +"Traffic Server is either on the same network segment or a GRE tunnel can be " +"maintained between the Traffic Server host and the router." +msgstr "" +"ルーターはインラインのネットワーク要素として動くので、Traffic Server は別の" +"ホスト上で動作しなければ成りません。このホストは Traffic Server が同じネット" +"ワークセグメント上にあるか Traffic Server のホストとルーターとの間で GRE ト" +"ンネルが維持可能であればどこに置くこともできます。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:47 +msgid "" +"|important| This document presumes that the router is already properly " +"configured to handle traffic between the clients and the origin servers. If " +"you are not certain, verify it before attempting to configure Traffic " +"Server with WCCP. This is also a good state to which to revert should the " +"configuration go badly." +msgstr "" +"|important| このドキュメントはルータがクライアントとオリジンサーバーとの間の" +"トラッフィクを扱えるように正しく設定済みであることを仮定しています。もし確か" +"でない場合は、WCCP を使うよう Traffic Server を設定する前に確認してくださ" +"い。設定がおかしくなった際に元に戻すのも良い心がけです。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:54 +msgid "Configuration overview" +msgstr "設定の概要" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:56 +msgid "" +"Setting WCCP is a three step process, first configuring the router, the " +"Traffic Server host, and Traffic Server." +msgstr "" +"WCCP の設定は、ルーターの設定、Traffic Server ホスト、そして Traffic Server " +"の 3 ステップです。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:59 +msgid "" +"|image1| The router will **not** respond to WCCP protocol packets unless " +"explicitly configured to do so. Via WCCP, the router can be made to perform " +"packet interception and redirection needed by Traffic Server transparency. " +"The WCCP protocol in effect acts as means of controlling a rough form of " +"policy routing with positive heartbeat cutoff." +msgstr "" +"|image1| ルーターは明示的に設定をしない限り WCCP プロトコルのパケットに応答" +"\\ *しません*\\ 。WCCP では、ルーターがパケットの横取りと Traffic Server の" +"透過性に必要とされる向き先の変更を行うように設定されます。WCCP プロトコルは" +"事実上、positive heartbeat cutoff によりポリシールーティングのだいたいの形を" +"制御する方法として振る舞います。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:65 +msgid "" +"The Traffic Server host system must also be configured using ``iptables`` " +"to accept connections on foreign addresses. This is done roughly the same " +"way as the standard transparency use." +msgstr "" +"Traffic Server のホストシステムは外部アドレスの接続を受け付けるために " +"``iptables`` を使うようにも設定されなかれば成りません。これは標準的な透過性" +"の使用とだいたい同じ方法で行えます。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:69 +msgid "" +"Finally Traffic Server itself must be configured for transparency and use " +"of WCCP. The former is again very similar to the standard use, while WCCP " +"configuration is specific to WCCP and uses a separate configuration file, " +"referred to by the :file:`records.config` file." +msgstr "" +"最後に Traffic Server 自体が透過性と WCCP を使用するように設定されなければな" +"りません。構成方法もまた標準的な使用とよく似ており、WCCP の設定は WCCP に特" +"化していて分離された設定ファイルを使用しますが、:file:`records.config` ファ" +"イルにより参照されます。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:74 +msgid "" +"The primary concern for configuration in which of three basic topologies " +"are to be used." +msgstr "三種類のどの基本トポロジーが使用されるかによる設定における主な懸念。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:77 +msgid "" +"Dedicated -- Traffic Server traffic goes over an interface that is not used " +"for client nor origin server traffic." +msgstr "" +"専用 -- Traffic Server のトラフィックがクライアントにもオリジンサーバーのト" +"ラフィックにも使われていないインターフェースに渡ります。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:79 +msgid "" +"Shared -- Traffic Server traffic shares an interface with client or server " +"traffic." +msgstr "" +"共有 -- Traffic Server のトラッフィクがクライアントまたはサーバーのトラ" +"フィックとインターフェースを共有します。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:81 +msgid "Inside Shared -- Traffic Server and client traffic share an interface." +msgstr "" +"内部共有 -- Traffic Server とクライアントのトラフィックがインターフェースを" +"共有します。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:83 +msgid "" +"`Outside Shared `_ -- Traffic Server and origin " +"server traffic share an interface." +msgstr "" +"`外部共有 `_ -- Traffic Server とオリジンサーバー" +"のトラフィックがインターフェースを共有します。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:92 +msgid "Shared interface issues" +msgstr "共有インターフェースの課題" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:94 +msgid "" +"A shared interface topology has additional issues compared to a dedicated " +"topology that must be handled. Such a topology is required if the router " +"has only two interfaces, and because of these additional issues it is " +"normally only used in such cases, although nothing prevents it use even if " +"the router has three or more interfaces." +msgstr "" +"共有インターフェーストポロジーは専用トポロジーと比べると扱わなければならない" +"追加の課題があります。このようなトポロジーはルーターが二つのインターフェース" +"しか持っていない場合に必要となり、ルーターが三つ以上のインターフェースを持っ" +"ている場合でもその使用を妨げるものはありませんが、これらの追加の課題があるの" +"で通常はそのようなケースでしか使用されません。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:110 +msgid "Frequently encountered problems" +msgstr "よくぶつかる問題" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:113 +msgid "MTU and fragmentation" +msgstr "MTU と断片化" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:115 +msgid "" +"In most cases the basic configuration using a tunnel in any topology can " +"fail due to issues with fragmentation. The socket logic is unable to know " +"that its packets will eventually be put in to a tunnel which will by its " +"nature have a smaller `MTU `_ than the physical interface which it uses. " +"This can lead to pathological behavior or outright failure as the packets " +"sent are just a little too big. It is not possible to solve easily by " +"changing the MTU on the physical interface because the tunnel interface " +"uses that to compute its own MTU." +msgstr "" +"ほとんどのケースではどのトポロジーでもトンネルを使用する基本設定は断片化の課" +"題により失敗します。ソケットのロジックはパケットがいずれ物理インターフェース" +"が使用するものより小さい `MTU `_ をもったトンネルに入るということを知ることがで" +"きません。これは送信したパケットがほんの少し大きすぎるのと同様に異常な振る舞" +"いや完全な失敗を引き起こします。トンネルインターフェースは自身の MTU を計算" +"するために物理インターフェース上の MTU を使用するのでそれを変更することで簡" +"単に解決することはできません。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:126 +msgid "References" +msgstr "参考文献" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:128 +msgid "" +"`WCCP Router Configuration Commands - IOS 12.2 `_" +msgstr "" +"`WCCP Router Configuration Commands - IOS 12.2 `_" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:139 +msgid "" +"Server side transparency should also be used, but it is not as significant. " +"In its absence, however, origin servers may see the source address of " +"connections suddenly change from the Traffic Server address to client " +"addresses, which could cause problems. Further, the primary reason for not " +"having server side transparency is to hide client addresses which is " +"defeated if the Traffic Server host fails." +msgstr "" +"サーバー側の透過性も使用されるべきですが、重要なことではありません。それがな" +"い場合であっても、オリジンサーバーは問題を起こすであろう突然 Traffic Server " +"のアドレスからクライアントのアドレスへと変わる接続元のアドレスに遭遇するで" +"しょう。また、サーバー側の透過性を持たない主な理由は、Traffic Server ホスト" +"が動かなくなったときに使えなくなるクライアントのアドレスを隠すためです。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:147 +msgid "If your router has only one interface, it's hardly a *router*." +msgstr "" +"ルーターが一つのインターフェースしかもっていないなら、それはまったく\\ *ルー" +"ター*\\ ではありません。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:150 +msgid "" +"This is not fundamentally impossible, as the packets are distinct in layer" +msgstr "パケットはレイヤー内では別々なので、本質的には不可能ではありません。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:36 +msgid "" +"Use of WCCP only makes sense for client side transparency [1]_ because if " +"the clients are explicitly proxied by Traffic Server there's no benefit to " +"WCCP fail open, as the clients will continue to directly access the " +"unresponsive Traffic Server host. It would be better to adjust the routing " +"tables on the router for explicit proxying." +msgstr "" +"クライアントが Traffic Server によって明示的にプロキシーされている場合には " +"WCCP の fail open に対して利益がなく、クライアントは応答しない Traffic " +"Server ホストに直接アクセスできるので WCCP の使用はクライアント側の透過性で" +"のみ意味をなします [1]_ 。ルーター上のルーティングテーブルを明示的なプロキ" +"シーのために調整すると良いでしょう。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:86 +msgid "" +"In general the dedicated topology is preferred. However, if the router has " +"only two interfaces one of the shared topologies will be required [2]_ " +"Click the links above for more detailed configuration information on a " +"specific topology." +msgstr "" +"一般的には専用トポロジーが好まれます。しかしながら、ルーターが二つしかイン" +"ターフェースを持っていない場合は二つの共有トポロジーのうちの一つが必要となり" +"ます [2]_ 。特定のトポロジーでのより詳細な設定情報については上記のリンクをク" +"リックしてください。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:100 +msgid "" +"The basic concept for a shared interface is to use a tunnel to simulate the " +"dedicated interface case. This enables the packets to be distinguished at " +"layer 3. For this reason, layer 2 redirection cannot be used because the " +"WCCP configuration cannot distinguish between packets returning from the " +"origin server and packets returning from Traffic Server as they are " +"distinguished only by layer 2 addressing [3]_. Fortunately the GRE tunnel " +"used for packet forwarding and return can be used as the simulated " +"interface for Traffic Server." +msgstr "" +"共有インターフェースのための基本的なコンセプトは専用インターフェースのケース" +"をシミュレートするためにトンネルを使うことです。これはパケットがレイヤー 3 " +"で識別できるようにします。この理由のために、オリジンサーバーから帰ってくるパ" +"ケットと Traffic Server から帰ってくるパケットの識別はレイヤー 2 のアドレス" +"でのみでき [3]_ WCCP の設定ではできないので、レイヤー 2 での向き先の変更は使" +"うことができません。幸いパケットの転送と返送に使われる GRE トンネルが " +"Traffic Server のためにシミュレートされたインターフェースとして使用できま" +"す。" + +#: ../../admin/transparent-proxy/wccp-configuration.en.rst:31 +msgid "" +"If already have a router that supports WCCP inline you do not have to " +"change your network topology." +msgstr "" +"もしすでに WCCP インラインに対応したルーターを持っているのならネットワー" +"クトポロジを変更する必要はありません。" diff --git a/doc/locale/ja/LC_MESSAGES/admin/working-log-files.en.po b/doc/locale/ja/LC_MESSAGES/admin/working-log-files.en.po new file mode 100644 index 00000000..88662ba5 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/working-log-files.en.po @@ -0,0 +1,1457 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/working-log-files.en.rst:4 +msgid "Working with Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:24 +msgid "" +"Traffic Server generates log files that contain information about every " +"request it receives and every error it detects." +msgstr "" + +#: ../../admin/working-log-files.en.rst:27 +msgid "This chapter discusses the following topics:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:37 +msgid "Understanding Traffic Server Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:39 +msgid "" +"Traffic Server records information about every transaction (or request) it " +"processes and every error it detects in log files. Traffic Server keeps " +"three types of log files:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:43 +msgid "" +"**Error log files** record information about why a particular transaction " +"was in error." +msgstr "" + +#: ../../admin/working-log-files.en.rst:46 +msgid "" +"**Event log files** (also called **access log files**) record information " +"about the state of each transaction Traffic Server processes." +msgstr "" + +#: ../../admin/working-log-files.en.rst:50 +msgid "" +"**System log files** record system information, including messages about " +"the state of Traffic Server and errors/warnings it produces. This kind of " +"information might include a note that event log files were rolled, a " +"warning that cluster communication timed out, or an error indicating that " +"Traffic Server was restarted." +msgstr "" + +#: ../../admin/working-log-files.en.rst:56 +msgid "" +"All system information messages are logged with the system-wide logging " +"facility :manpage:`syslog` under the daemon facility. The :manpage:`syslog." +"conf(5)` configuration file (stored in the ``/etc`` directory) specifies " +"where these messages are logged. A typical location is ``/var/log/" +"messages`` (Linux)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:62 +msgid "" +"The :manpage:`syslog(8)` process works on a system-wide basis, so it serves " +"as the single repository for messages from all Traffic Server processes " +"(including :program:`traffic_server`, :program:`traffic_manager`, and :" +"program:`traffic_cop`)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:67 +msgid "" +"System information logs observe a static format. Each log entry in the log " +"contains information about the date and time the error was logged, the " +"hostname of the Traffic Server that reported the error, and a description " +"of the error or warning." +msgstr "" + +#: ../../admin/working-log-files.en.rst:72 +msgid "" +"Refer to :ref:`traffic-server-error-messages` for a list of the messages " +"logged by Traffic Server." +msgstr "" + +#: ../../admin/working-log-files.en.rst:75 +msgid "" +"By default, Traffic Server creates both error and event log files and " +"records system information in system log files. You can disable event " +"logging and/or error logging by setting the configuration variable :ts:cv:" +"`proxy.config.log.logging_enabled` (in the :file:`records.config` file) to " +"one of the following values:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:81 +msgid "``0`` to disable both event and error logging" +msgstr "" + +#: ../../admin/working-log-files.en.rst:82 +msgid "``1`` to enable error logging only" +msgstr "" + +#: ../../admin/working-log-files.en.rst:83 +msgid "``2`` to enable transaction logging only" +msgstr "" + +#: ../../admin/working-log-files.en.rst:84 +msgid "``3`` to enable both transaction and error logging" +msgstr "" + +#: ../../admin/working-log-files.en.rst:87 +msgid "Understanding Event Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:89 +msgid "" +"Event log files record information about every request that Traffic Server " +"processes. By analyzing the log files, you can determine how many people " +"use the Traffic Server cache, how much information each person requested, " +"what pages are most popular, and so on. Traffic Server supports several " +"standard log file formats, such as Squid and Netscape, as well as user-" +"defined custom formats. You can analyze the standard format log files with " +"off-the-shelf analysis packages. To help with log file analysis, you can " +"separate log files so they contain information specific to protocol or " +"hosts. You can also configure Traffic Server to roll log files " +"automatically at specific intervals during the day or when they reach a " +"certain size." +msgstr "" + +#: ../../admin/working-log-files.en.rst:101 +msgid "" +"The following sections describe the Traffic Server logging system features " +"and discuss how to:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:104 +msgid "**Manage your event log files**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:106 +msgid "" +"You can choose a central location for storing log files, set how much disk " +"space to use for log files, and set how and when to roll log files. Refer " +"to `Managing Event Log Files`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:110 +msgid "**Choose different event log file formats**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:112 +msgid "" +"You can choose which standard log file formats you want to use for traffic " +"analysis, such as Squid or Netscape. Alternatively, you can use the Traffic " +"Server custom format, which is XML-based and enables you to institute more " +"control over the type of information recorded in log files. Refer to " +"`Choosing Event Log File Formats`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:118 +msgid "**Roll event log files automatically**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:120 +msgid "" +"Configure Traffic Server to roll event log files at specific intervals " +"during the day or when they reach a certain size; this enables you to " +"identify and manipulate log files that are no longer active. Refer to " +"`Rolling Event Log Files`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:125 +msgid "**Separate log files according to protocols and hosts**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:127 +msgid "" +"Configure Traffic Server to create separate log files for different " +"protocols. You can also configure Traffic Server to generate separate log " +"files for requests served by different hosts. Refer to `Splitting Event Log " +"Files`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:131 +msgid "**Collate log files from different Traffic Server nodes**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:133 +msgid "" +"Designate one or more nodes on the network to serve as log collation " +"servers. These servers, which might be standalone or part of Traffic " +"Server, enable you to keep all logged information in well-defined " +"locations. Refer to `Collating Event Log Files`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:138 +msgid "**View statistics about the logging system**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:140 +msgid "" +"Traffic Server provides statistics about the logging system; you can access " +"these statistics via Traffic Line. Refer to `Viewing Logging Statistics`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:143 +msgid "**Interpret log file entries for the log file formats**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:145 +msgid "Refer to `Example Event Log File Entries`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:148 +msgid "Managing Event Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:150 +msgid "" +"Traffic Server enables you to control where event log files are located and " +"how much space they can consume. Additionally you can specify how to handle " +"low disk space in the logging directory." +msgstr "" + +#: ../../admin/working-log-files.en.rst:155 +msgid "Choosing the Logging Directory" +msgstr "" + +#: ../../admin/working-log-files.en.rst:157 +msgid "" +"By default, Traffic Server writes all event log files in the ``logs`` " +"directory located in the directory where you installed Traffic Server. To " +"use a different directory, refer to `Setting Log File Management Options`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:162 +msgid "Controlling Logging Space" +msgstr "" + +#: ../../admin/working-log-files.en.rst:164 +msgid "" +"Traffic Server enables you to control the amount of disk space that the " +"logging directory can consume. This allows the system to operate smoothly " +"within a specified space window for a long period of time. After you " +"establish a space limit, Traffic Server continues to monitor the space in " +"the logging directory. When the free space dwindles to the headroom limit " +"(see `Setting Log File Management Options`_), it enters a low space state " +"and takes the following actions:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:172 +msgid "" +"If the autodelete option (discussed in `Rolling Event Log Files`_) is " +"*enabled*, then Traffic Server identifies previously-rolled log files (i." +"e., log files with the ``.old`` extension). It starts deleting files one by " +"one, beginning with the oldest file, until it emerges from the low state. " +"Traffic Server logs a record of all deleted files in the system error log." +msgstr "" + +#: ../../admin/working-log-files.en.rst:179 +msgid "" +"If the autodelete option is *disabled* or there are not enough old log " +"files to delete for the system to emerge from its low space state, then " +"Traffic Server issues a warning and continues logging until space is " +"exhausted. When available space is consumed, event logging stops. Traffic " +"Server resumes event logging when enough space becomes available for it to " +"exit the low space state. To make space available, either explicitly " +"increase the logging space limit or remove files from the logging directory " +"manually." +msgstr "" + +#: ../../admin/working-log-files.en.rst:188 +msgid "" +"You can run a :manpage:`cron(8)` script in conjunction with Traffic Server " +"to automatically remove old log files from the logging directory before " +"Traffic Server enters the low space state. Relocate the old log files to a " +"temporary partition, where you can run a variety of log analysis scripts. " +"Following analysis, either compress the logs and move to an archive " +"location, or simply delete them." +msgstr "" + +#: ../../admin/working-log-files.en.rst:199 +msgid "Setting Log File Management Options" +msgstr "" + +#: ../../admin/working-log-files.en.rst:201 +msgid "To set log management options, follow the steps below:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:203 +#: ../../admin/working-log-files.en.rst:238 +#: ../../admin/working-log-files.en.rst:576 +#: ../../admin/working-log-files.en.rst:660 +msgid "In the :file:`records.config` file, edit the following variables" +msgstr "" + +#: ../../admin/working-log-files.en.rst:205 +#: ../../admin/working-log-files.en.rst:808 +msgid ":ts:cv:`proxy.config.log.logfile_dir`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:206 +msgid ":ts:cv:`proxy.config.log.max_space_mb_for_logs`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:207 +msgid ":ts:cv:`proxy.config.log.max_space_mb_headroom`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:209 +#: ../../admin/working-log-files.en.rst:267 +#: ../../admin/working-log-files.en.rst:583 +#: ../../admin/working-log-files.en.rst:665 +#: ../../admin/working-log-files.en.rst:696 +#: ../../admin/working-log-files.en.rst:767 +#: ../../admin/working-log-files.en.rst:834 +msgid "" +"Run the command :option:`traffic_line -x` to apply the configuration " +"changes." +msgstr "" + +#: ../../admin/working-log-files.en.rst:213 +msgid "Choosing Event Log File Formats" +msgstr "" + +#: ../../admin/working-log-files.en.rst:215 +msgid "Traffic Server supports the following log file formats:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:217 +msgid "" +"Standard formats, such as Squid or Netscape; refer to `Using Standard " +"Formats`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:218 +msgid "The Traffic Server custom format; refer to `Using the Custom Format`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:220 +msgid "" +"In addition to the standard and custom log file format, you can choose " +"whether to save log files in binary or ASCII; refer to `Choosing Binary or " +"ASCII`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:223 +msgid "" +"Event log files consume substantial disk space. Creating log entries in " +"multiple formats at the same time can consume disk resources very quickly " +"and adversely impact Traffic Server performance." +msgstr "" + +#: ../../admin/working-log-files.en.rst:228 +msgid "Using Standard Formats" +msgstr "" + +#: ../../admin/working-log-files.en.rst:230 +msgid "" +"The standard log formats include Squid, Netscape Common, Netscape extended, " +"and Netscape Extended-2. The standard log file formats can be analyzed with " +"a wide variety of off-the-shelf log-analysis packages. You should use one " +"of the standard event log formats unless you need information that these " +"formats do not provide. Refer to `Using the Custom Format`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:236 +msgid "Set standard log file format options by following the steps below:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:239 +msgid "Edit the following variables to use the Squid format:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:241 +msgid ":ts:cv:`proxy.config.log.squid_log_enabled`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:242 +msgid ":ts:cv:`proxy.config.log.squid_log_is_ascii`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:243 +msgid ":ts:cv:`proxy.config.log.squid_log_name`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:244 +msgid ":ts:cv:`proxy.config.log.squid_log_header`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:246 +msgid "To use the Netscape Common format, edit the following variables:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:248 +msgid ":ts:cv:`proxy.config.log.common_log_enabled`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:249 +msgid ":ts:cv:`proxy.config.log.common_log_is_ascii`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:250 +msgid ":ts:cv:`proxy.config.log.common_log_name`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:251 +msgid ":ts:cv:`proxy.config.log.common_log_header`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:253 +msgid "To use the Netscape Extended format, edit the following variables:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:255 +msgid ":ts:cv:`proxy.config.log.extended_log_enabled`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:256 +msgid ":ts:cv:`proxy.config.log.extended_log_is_ascii`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:257 +msgid ":ts:cv:`proxy.config.log.extended_log_name`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:258 +msgid ":ts:cv:`proxy.config.log.extended_log_header`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:260 +msgid "To use the Netscape Extended-2 format, edit the following variables:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:262 +msgid ":ts:cv:`proxy.config.log.extended2_log_enabled`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:263 +msgid ":ts:cv:`proxy.config.log.extended2_log_is_ascii`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:264 +msgid ":ts:cv:`proxy.config.log.extended2_log_name`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:265 +msgid ":ts:cv:`proxy.config.log.extended2_log_header`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:275 +msgid "Using the Custom Format" +msgstr "" + +#: ../../admin/working-log-files.en.rst:277 +msgid "" +"The XML-based custom log format is more flexible then the standard log file " +"formats and gives you more control over the type of information recorded in " +"log files. You should create a custom log format if you need data for " +"analysis that's not available in the standard formats. You can decide what " +"information to record for each Traffic Server transaction and create " +"filters that specify which transactions to log." +msgstr "" + +#: ../../admin/working-log-files.en.rst:284 +msgid "" +"The heart of the XML-based custom logging feature is the XML-based logging " +"configuration file (:file:`logs_xml.config`) that enables you to create " +"very modular descriptions of logging objects. The :file:`logs_xml.config` " +"file uses three types of objects to create custom log files, as detailed " +"below. To generate a custom log format, you must specify at least one " +"``LogObject`` definition (one log file is produced for each ``LogObject`` " +"definition)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:292 +msgid "" +"The ``LogFormat`` object defines the content of the log file using printf-" +"style format strings." +msgstr "" + +#: ../../admin/working-log-files.en.rst:294 +msgid "" +"The ``LogFilter`` object defines a filter so that you include or exclude " +"certain information from the log file." +msgstr "" + +#: ../../admin/working-log-files.en.rst:296 +msgid "" +"The ``LogObject`` object specifies all the information needed to produce a " +"log file." +msgstr "" + +#: ../../admin/working-log-files.en.rst:299 +msgid "The name of the log file. (required)" +msgstr "" + +#: ../../admin/working-log-files.en.rst:300 +msgid "" +"The format to be used (required). This can be a standard format (Squid or " +"Netscape) or" +msgstr "" + +#: ../../admin/working-log-files.en.rst:302 +msgid "" +"a previously-defined custom format (i.e., a previously-defined " +"``LogFormat`` object)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:304 +msgid "" +"The file mode: ``ASCII``, ``Binary``, or ``ASCII_PIPE``. The default is " +"``ASCII``. The ``ASCII_PIPE`` mode writes log entries to a UNIX-named pipe " +"(a buffer in memory); other processes can then read the data using standard " +"I/O functions. The advantage of this option is that Traffic Server does not " +"have to write to disk, which frees disk space and bandwidth for other " +"tasks. When the buffer is full, Traffic Server drops log entries and issues " +"an error message indicating how many entries were dropped. Because Traffic " +"Server only writes complete log entries to the pipe, only full records are " +"dropped." +msgstr "" + +#: ../../admin/working-log-files.en.rst:315 +msgid "" +"Any filters you want to use (i.e., previously-defined ``LogFilter`` " +"objects)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:317 +msgid "The collation servers that are to receive the log files." +msgstr "" + +#: ../../admin/working-log-files.en.rst:318 +msgid "" +"The protocols you want to log. If the protocols tag is used, then Traffic " +"Server will only log transactions from the protocols listed; otherwise, all " +"transactions for all protocols are logged." +msgstr "" + +#: ../../admin/working-log-files.en.rst:321 +msgid "" +"The origin servers you want to log. If the ``servers`` tag is used, then " +"Traffic Server will only log transactions for the origin servers listed; " +"otherwise, transactions for all origin servers are logged." +msgstr "" + +#: ../../admin/working-log-files.en.rst:325 +msgid "" +"The header text you want the log files to contain. The header text appears " +"at the beginning of the log file, just before the first record." +msgstr "" + +#: ../../admin/working-log-files.en.rst:328 +msgid "The log file rolling options." +msgstr "" + +#: ../../admin/working-log-files.en.rst:330 +msgid "In order to accomplish this, we" +msgstr "" + +#: ../../admin/working-log-files.en.rst:332 +msgid "edit the following variables in the :file:`records.config` file:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:334 +msgid ":ts:cv:`proxy.config.log.custom_logs_enabled`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:335 +msgid "In the :file:`logs_xml.config` file" +msgstr "" + +#: ../../admin/working-log-files.en.rst:337 +msgid "" +"Add :ref:`LogFormat`, :ref:`LogFilters`, and :ref:`LogObject` " +"specifications to the configuration file." +msgstr "" + +#: ../../admin/working-log-files.en.rst:339 +msgid "Save and close the :file:`logs_xml.config` file." +msgstr "" + +#: ../../admin/working-log-files.en.rst:340 +msgid "" +"Run the command :option:`traffic_line -x` to apply your configuration " +"changes." +msgstr "" + +#: ../../admin/working-log-files.en.rst:344 +msgid "Creating Summary Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:346 +msgid "" +"Traffic Server performs several hundred operations per second; therefore, " +"event log files can quickly grow to large sizes. Using SQL-like aggregate " +"operators, you can configure Traffic Server to create summary log files " +"that summarize a set of log entries over a specified period of time. This " +"can significantly reduce the size of the log files generated." +msgstr "" + +#: ../../admin/working-log-files.en.rst:353 +msgid "" +"To generate a summary log file, create a :ref:`LogFormat` object in the XML-" +"based logging configuration file (:file:`logs_xml.config`) using the SQL-" +"like aggregate operators below. You can apply each of these operators to " +"specific fields, over a specified interval." +msgstr "" + +#: ../../admin/working-log-files.en.rst:359 +msgid "``COUNT``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:360 +msgid "``SUM``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:361 +msgid "``AVERAGE``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:362 +msgid "``FIRST``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:363 +msgid "``LAST``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:365 +msgid "To create a summary log file format, we" +msgstr "" + +#: ../../admin/working-log-files.en.rst:367 +msgid "" +"Define the format of the log file in :file:`logs_xml.config` as follows:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:380 +msgid "" +"where ``operator`` is one of the five aggregate operators (``COUNT``, " +"``SUM``, ``AVERAGE``, ``FIRST``, ``LAST``), ``field`` is the logging field " +"you want to aggregate, and ``n`` is the interval (in seconds) between " +"summary log entries. You can specify more than one ``operator`` in the " +"format line. For more information, refer to :file`logs_xml.config`." +msgstr "" + +#: ../../admin/working-log-files.en.rst:387 +msgid "" +"Run the command :option:`traffic_line -x` to apply configuration changes ." +msgstr "" + +#: ../../admin/working-log-files.en.rst:389 +msgid "" +"The following example format generates one entry every 10 seconds. Each " +"entry contains the timestamp of the last entry of the interval, a count of " +"the number of entries seen within that 10-second interval, and the sum of " +"all bytes sent to the client: ::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:402 +msgid "" +"You cannot create a format specification that contains both aggregate " +"operators and regular fields. For example, the following specification " +"would be **invalid**: ::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:409 +msgid "Choosing Binary or ASCII" +msgstr "" + +#: ../../admin/working-log-files.en.rst:411 +msgid "" +"You can configure Traffic Server to create event log files in either of the " +"following:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:414 +msgid "**ASCII**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:416 +msgid "" +"These files are human-readable and can be processed using standard, off-the-" +"shelf log analysis tools. However, Traffic Server must perform additional " +"processing to create the files in ASCII, which mildly impacts system " +"overhead. ASCII files also tend to be larger than the equivalent binary " +"files. By default, ASCII log files have a ``.log`` filename extension." +msgstr "" + +#: ../../admin/working-log-files.en.rst:423 +msgid "**Binary**" +msgstr "" + +#: ../../admin/working-log-files.en.rst:425 +msgid "" +"These files generate lower system overhead and generally occupy less space " +"on the disk than ASCII files (depending on the type of information being " +"logged). However, you must use a converter application before you can read " +"or analyze binary files via standard tools. By default, binary log files " +"use a ``.blog`` filename extension." +msgstr "" + +#: ../../admin/working-log-files.en.rst:432 +msgid "" +"While binary log files typically require less disk space, there are " +"exceptions." +msgstr "" + +#: ../../admin/working-log-files.en.rst:435 +msgid "" +"For example: the value ``0`` (zero) requires only one byte to store in " +"ASCII, but requires four bytes when stored as a binary integer. Conversely: " +"if you define a custom format that logs IP addresses, then a binary log " +"file would only require four bytes of storage per 32-bit address. However, " +"the same IP address stored in dot notation would require around 15 " +"characters (bytes) in an ASCII log file. Therefore, it's wise to consider " +"the type of data that will be logged before you select ASCII or binary for " +"your log files. For example, you might try logging for one day using ASCII " +"and then another day using binary. If the number of requests is roughly the " +"same for both days, then you can calculate a rough metric that compares the " +"two formats." +msgstr "" + +#: ../../admin/working-log-files.en.rst:447 +msgid "" +"For standard log formats, select Binary or ASCII (refer to `Setting " +"Standard Log File Format Options`). For the custom log format, specify " +"ASCII or Binary mode in the :ref:`LogObject` (refer to :ref:`Using the " +"Custom Format `). In addition to the ASCII and " +"binary options, you can also write custom log entries to a UNIX-named pipe " +"(i.e., a buffer in memory). Other processes can then read the data using " +"standard I/O functions. The advantage of using this option is that Traffic " +"Server does not have to write to disk, which frees disk space and bandwidth " +"for other tasks. In addition, writing to a pipe does not stop when logging " +"space is exhausted because the pipe does not use disk space. Refer to :file:" +"`logs_xml.config` for more information about the ``ASCII_PIPE`` option." +msgstr "" + +#: ../../admin/working-log-files.en.rst:463 +msgid "Rolling Event Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:465 +msgid "" +"Traffic Server provides automatic log file rolling. This means that at " +"specific intervals during the day or when log files reach a certain size, " +"Traffic Server closes its current set of log files and opens new log files. " +"Depending on the amount of traffic your servers are exposed to, you should " +"roll log files several times a day. Rolling every six hours is a good " +"guideline to start with." +msgstr "" + +#: ../../admin/working-log-files.en.rst:472 +msgid "Log file rolling offers the following benefits:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:474 +msgid "It defines an interval over which log analysis can be performed." +msgstr "" + +#: ../../admin/working-log-files.en.rst:475 +msgid "" +"It keeps any single log file from becoming too large and helps to keep the " +"logging system within the specified space limits." +msgstr "" + +#: ../../admin/working-log-files.en.rst:477 +msgid "" +"It provides an easy way to identify files that are no longer being used so " +"that an automated script can clean the logging directory and run log " +"analysis programs." +msgstr "" + +#: ../../admin/working-log-files.en.rst:482 +msgid "Rolled Log Filename Format" +msgstr "" + +#: ../../admin/working-log-files.en.rst:484 +msgid "" +"Traffic Server provides a consistent naming scheme for rolled log files " +"that enables you to easily identify log files. When Traffic Server rolls a " +"log file, it saves and closes the old file before it starts a new file. " +"Traffic Server renames the old file to include the following information:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:490 +msgid "The format of the file (such as ``squid.log``)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:491 +msgid "The hostname of the Traffic Server that generated the log file." +msgstr "" + +#: ../../admin/working-log-files.en.rst:492 +msgid "" +"Two timestamps separated by a hyphen (``-``). The first timestamp is a " +"**lower bound** for the timestamp of the first record in the log file. The " +"lower bound is the time when the new buffer for log records is created. " +"Under low load, the first timestamp in the filename can be different from " +"the timestamp of the first entry. Under normal load, the first timestamp in " +"the filename and the timestamp of the first entry are similar. The second " +"timestamp is an **upper bound** for the timestamp of the last record in the " +"log file (this is normally the rolling time)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:501 +msgid "" +"The suffix ``.old``, which makes it easy for automated scripts to find " +"rolled log files." +msgstr "" + +#: ../../admin/working-log-files.en.rst:504 +msgid "Timestamps have the following format: ::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:508 +msgid "The following table describes the format:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:511 +msgid "``%Y``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:511 +msgid "The year in four-digit format. For example: 2000." +msgstr "" + +#: ../../admin/working-log-files.en.rst:514 +#: ../../admin/working-log-files.en.rst:523 +msgid "``%M``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:514 +msgid "The month in two-digit format, from 01-12. For example: 07." +msgstr "" + +#: ../../admin/working-log-files.en.rst:517 +msgid "``%D``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:517 +msgid "The day in two-digit format, from 01-31. For example: 19." +msgstr "" + +#: ../../admin/working-log-files.en.rst:520 +msgid "``%H``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:520 +msgid "The hour in two-digit format, from 00-23. For example: 21." +msgstr "" + +#: ../../admin/working-log-files.en.rst:523 +msgid "The minute in two-digit format, from 00-59. For example: 52." +msgstr "" + +#: ../../admin/working-log-files.en.rst:526 +msgid "``%S``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:526 +msgid "The second in two-digit format, from 00-59. For example: 36." +msgstr "" + +#: ../../admin/working-log-files.en.rst:528 +msgid "The following is an example of a rolled log filename: ::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:532 +msgid "" +"The logging system buffers log records before writing them to disk. When a " +"log file is rolled, the log buffer might be partially full. If it is, then " +"the first entry in the new log file will have a timestamp earlier than the " +"time of rolling. When the new log file is rolled, its first timestamp will " +"be a lower bound for the timestamp of the first entry." +msgstr "" + +#: ../../admin/working-log-files.en.rst:538 +msgid "" +"For example, suppose logs are rolled every three hours, and the first " +"rolled log file is: ::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:543 +msgid "" +"If the lower bound for the first entry in the log buffer at 3:00:00 is " +"2:59:47, then the next log file will have the following timestamp when " +"rolled: ::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:549 +msgid "" +"The contents of a log file are always between the two timestamps. Log files " +"do not contain overlapping entries, even if successive timestamps appear to " +"overlap." +msgstr "" + +#: ../../admin/working-log-files.en.rst:554 +msgid "Rolling Intervals" +msgstr "" + +#: ../../admin/working-log-files.en.rst:556 +msgid "" +"Log files are rolled at specific intervals relative to a given hour of the " +"day. Two options control when log files are rolled:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:559 +msgid "The offset hour, which is an hour between 0 (midnight) and 23" +msgstr "" + +#: ../../admin/working-log-files.en.rst:560 +msgid "The rolling interval" +msgstr "" + +#: ../../admin/working-log-files.en.rst:562 +msgid "" +"Both the offset hour and the rolling interval determine when log file " +"rolling starts. Rolling occurs every rolling interval and at the offset " +"hour. For example, if the rolling interval is six hours and the offset hour " +"is 0 (midnight), then the logs will roll at midnight (00:00), 06:00, 12:00, " +"and 18:00 each day. If the rolling interval is 12 hours and the offset hour " +"is 3, then logs will roll at 03:00 and 15:00 each day." +msgstr "" + +#: ../../admin/working-log-files.en.rst:571 +msgid "Setting Log File Rolling Options" +msgstr "" + +#: ../../admin/working-log-files.en.rst:573 +msgid "" +"To set log file rolling options and/or configure Traffic Server to roll log " +"files when they reach a certain size, follow the steps below:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:578 +msgid ":ts:cv:`proxy.config.log.rolling_enabled`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:579 +msgid ":ts:cv:`proxy.config.log.rolling_size_mb`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:580 +msgid ":ts:cv:`proxy.config.log.rolling_offset_hr`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:581 +msgid ":ts:cv:`proxy.config.log.rolling_interval_sec`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:586 +msgid "" +"You can fine-tune log file rolling settings for a custom log file in the :" +"ref:`LogObject` specification in the :file:`logs_xml.config` file. The " +"custom log file uses the rolling settings in its :ref:`LogObject`, which " +"override the default settings you specify in Traffic Manager or the :file:" +"`records.config` file described above." +msgstr "" + +#: ../../admin/working-log-files.en.rst:596 +msgid "Splitting Event Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:598 +msgid "" +"By default, Traffic Server uses standard log formats and generates log " +"files that contain HTTP & ICP transactions in the same file. However, you " +"can enable log splitting if you prefer to log transactions for different " +"protocols in separate log files." +msgstr "" + +#: ../../admin/working-log-files.en.rst:604 +msgid "ICP Log Splitting" +msgstr "" + +#: ../../admin/working-log-files.en.rst:606 +msgid "" +"When ICP log splitting is enabled, Traffic Server records ICP transactions " +"in a separate log file with a name that contains ``icp``. For example: if " +"you enable the Squid format, then all ICP transactions are recorded in the " +"``squid-icp.log`` file. When you disable ICP log splitting, Traffic Server " +"records all ICP transactions in the same log file as HTTP transactions." +msgstr "" + +#: ../../admin/working-log-files.en.rst:614 +msgid "HTTP Host Log Splitting" +msgstr "" + +#: ../../admin/working-log-files.en.rst:616 +msgid "" +"HTTP host log splitting enables you to record HTTP transactions for " +"different origin servers in separate log files. When HTTP host log " +"splitting is enabled, Traffic Server creates a separate log file for each " +"origin server that's listed in the :file:`log_hosts.config` file. When both " +"ICP and HTTP host log splitting are enabled, Traffic Server generates " +"separate log files for HTTP transactions (based on the origin server) and " +"places all ICP transactions in their own respective log files. For example, " +"if the :file:`log_hosts.config` file contains the two origin servers ``uni." +"edu`` and ``company.com`` and Squid format is enabled, then Traffic Server " +"generates the following log files:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:628 +#: ../../admin/working-log-files.en.rst:644 +msgid "``squid-uni.edu.log``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:628 +msgid "All HTTP transactions for ``uni.edu``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:631 +#: ../../admin/working-log-files.en.rst:647 +msgid "``squid-company.com.log``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:631 +msgid "All HTTP transactions for ``company.com``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:634 +msgid "``squid-icp.log``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:634 +msgid "All ICP transactions for all hosts" +msgstr "" + +#: ../../admin/working-log-files.en.rst:637 +#: ../../admin/working-log-files.en.rst:650 +msgid "``squid.log``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:637 +msgid "All HTTP transactions for other hosts" +msgstr "" + +#: ../../admin/working-log-files.en.rst:639 +msgid "" +"If you disable ICP log splitting, then ICP transactions are placed in the " +"same log file as HTTP transactions. Using the hosts and log format from the " +"previous example, Traffic Server generates the log files below:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:644 +msgid "All entries for ``uni.edu``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:647 +msgid "All entries for ``company.com``" +msgstr "" + +#: ../../admin/working-log-files.en.rst:650 +msgid "All other entries" +msgstr "" + +#: ../../admin/working-log-files.en.rst:652 +msgid "" +"Traffic Server also enables you to create XML-based :ref:`Custom Log " +"Formats ` that offer even greater control over " +"log file generation." +msgstr "" + +#: ../../admin/working-log-files.en.rst:656 +msgid "Setting Log Splitting Options" +msgstr "" + +#: ../../admin/working-log-files.en.rst:658 +msgid "To set log splitting options, follow the steps below:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:662 +msgid ":ts:cv:`proxy.config.log.separate_icp_logs`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:663 +msgid ":ts:cv:`proxy.config.log.separate_host_logs`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:669 +msgid "Editing the log_hosts.config File" +msgstr "" + +#: ../../admin/working-log-files.en.rst:671 +msgid "" +"The default :file:`log_hosts.config` file is located in the Traffic Server " +"``config`` directory. To record HTTP transactions for different origin " +"servers in separate log files, you must specify the hostname of each origin " +"server on a separate line in the :file:`log_hosts.config` file. For " +"example, if you specify the keyword sports, then Traffic Server records all " +"HTTP transactions from ``sports.yahoo.com`` and ``www.foxsports.com`` in a " +"log file called ``squid-sports.log`` (if the Squid format is enabled)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:682 +msgid "" +"If Traffic Server is clustered and you enable log file collation, then you " +"should use the same :file:`log_hosts.config` file on every Traffic Server " +"node in the cluster." +msgstr "" + +#: ../../admin/working-log-files.en.rst:686 +msgid "To edit the :file:`log_hosts.config` file follow the steps below:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:688 +msgid "" +"In the :file:`log_hosts.config` file, enter the hostname of each origin " +"server on a separate line in the file, e.g.: ::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:700 +msgid "Collating Event Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:702 +msgid "" +"You can use the Traffic Server log file collation feature to collect all " +"logged information in one place. Log collation enables you to analyze a set " +"of Traffic Server clustered nodes as a whole (rather than as individual " +"nodes) and to use a large disk that might only be located on one of the " +"nodes in the cluster. Traffic Server collates log files by using one or " +"more nodes as log collation servers and all remaining nodes as log " +"collation clients. When a Traffic Server node generates a buffer of event " +"log entries, it first determines if it is the collation server or a " +"collation client. The collation server node writes all log buffers to its " +"local disk, just as it would if log collation was not enabled. Log " +"collation servers can be standalone or they can be part of a node running " +"Traffic Server." +msgstr "" + +#: ../../admin/working-log-files.en.rst:715 +msgid "" +"The collation client nodes prepare their log buffers for transfer across " +"the network and send the buffers to the log collation server. When the log " +"collation server receives a log buffer from a client, it writes it to its " +"own log file as if it was generated locally. For a visual representation of " +"this, see the figure below." +msgstr "" + +#: ../../admin/working-log-files.en.rst:725 +msgid "Log collation" +msgstr "" + +#: ../../admin/working-log-files.en.rst:727 +msgid "" +"If log clients cannot contact their log collation server, then they write " +"their log buffers to their local disks, into *orphan* log files. Orphan log " +"files require manual collation." +msgstr "" + +#: ../../admin/working-log-files.en.rst:733 +msgid "" +"Log collation can have an impact on network performance. Because all nodes " +"are forwarding their log data buffers to the single collation server, a " +"bottleneck can occur. In addition, collated log files contain timestamp " +"information for each entry, but entries in the files do not appear in " +"strict chronological order. You may want to sort collated log files before " +"doing analysis." +msgstr "" + +#: ../../admin/working-log-files.en.rst:740 +msgid "" +"To configure Traffic Server to collate event log files, you must perform " +"the following tasks:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:743 +msgid "" +"Either `Configure Traffic Server Node to Be a Collation Server `_ or install & configure a " +"`Standalone Collator `_" +msgstr "" + +#: ../../admin/working-log-files.en.rst:746 +msgid "" +"`Configure Traffic Server Nodes to Be a Collation Clients `_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:748 +msgid "" +"Add an attribute to the :ref:`LogObject` specification in the :file:" +"`logs_xml.config` file if you are using custom log file formats; refer to " +"`Collating Custom Event Log Files`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:756 +msgid "Configuring Traffic Server to Be a Collation Server" +msgstr "" + +#: ../../admin/working-log-files.en.rst:758 +msgid "" +"To configure a Traffic Server node to be a collation server, simply edit a " +"configuration file via the steps below." +msgstr "" + +#: ../../admin/working-log-files.en.rst:761 +msgid "In the :file:`records.config` file, edit the following variables" +msgstr "" + +#: ../../admin/working-log-files.en.rst:763 +msgid ":ts:cv:`proxy.local.log.collation_mode` (``1`` for server mode)" +msgstr "" + +#: ../../admin/working-log-files.en.rst:764 +#: ../../admin/working-log-files.en.rst:829 +msgid ":ts:cv:`proxy.config.log.collation_port`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:765 +#: ../../admin/working-log-files.en.rst:830 +msgid ":ts:cv:`proxy.config.log.collation_secret`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:772 +msgid "" +"If you modify the ``collation_port`` or ``secret`` after connections " +"between the collation server and collation clients have been established, " +"then you must restart Traffic Server." +msgstr "" + +#: ../../admin/working-log-files.en.rst:777 +msgid "Using a Standalone Collator" +msgstr "" + +#: ../../admin/working-log-files.en.rst:779 +msgid "" +"If you do not want the log collation server to be a Traffic Server node, " +"then you can install and configure a standalone collator (SAC) that will " +"dedicate more of its power to collecting, processing, and writing log files." +msgstr "" + +#: ../../admin/working-log-files.en.rst:784 +msgid "To install and configure a standalone collator:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:786 +msgid "" +"Configure your Traffic Server nodes as log collation clients; refer to " +"`Configuring Traffic Server to Be a Collation Client " +"<#ConfiguringTSCollationClient>`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:789 +msgid "" +"Copy the :program:`traffic_sac` binary from the Traffic Server ``bin`` " +"directory and" +msgstr "" + +#: ../../admin/working-log-files.en.rst:791 +msgid "" +"Copy the ``libtsutil.so`` libraries from the Traffic Server ``lib`` " +"directory to the machine serving as the standalone collator." +msgstr "" + +#: ../../admin/working-log-files.en.rst:793 +msgid "" +"Create a directory called ``config`` in the directory that contains the :" +"program:`traffic_sac` binary." +msgstr "" + +#: ../../admin/working-log-files.en.rst:795 +msgid "" +"Create a directory called ``internal`` in the ``config`` directory you " +"created in Step 4 (above). This directory is used internally by the " +"standalone collator to store lock files." +msgstr "" + +#: ../../admin/working-log-files.en.rst:798 +msgid "" +"Copy the :file:`records.config` file from a Traffic Server node configured " +"to be a log collation client to the ``config`` directory you created in " +"Step 4 on the standalone collator. The :file:`records.config` file contains " +"the log collation secret and the port you specified when configuring " +"Traffic Server nodes to be collation clients. The collation port and secret " +"must be the same for all collation clients and servers." +msgstr "" + +#: ../../admin/working-log-files.en.rst:805 +msgid "In the :file:`records.config` file, edit the following variable" +msgstr "" + +#: ../../admin/working-log-files.en.rst:810 +msgid "Enter the following command::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:815 +msgid "Configuring Traffic Server to Be a Collation Client" +msgstr "" + +#: ../../admin/working-log-files.en.rst:817 +msgid "" +"To configure a Traffic Server node to be a collation client, follow the " +"steps below. If you modify the ``collation_port`` or ``secret`` after " +"connections between the collation clients and the collation server have " +"been established, then you must restart Traffic Server." +msgstr "" + +#: ../../admin/working-log-files.en.rst:822 +msgid "In the :file:`records.config` file, edit the following variables:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:824 +msgid "" +":ts:cv:`proxy.local.log.collation_mode`: ``2`` to configure this node as " +"log collation client and send standard formatted log entries to the " +"collation server. For XML-based formatted log entries, see :file:`logs_xml." +"config` file; refer to :ref:`Using the Custom Format `." +msgstr "" + +#: ../../admin/working-log-files.en.rst:828 +msgid ":ts:cv:`proxy.config.log.collation_host`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:831 +msgid ":ts:cv:`proxy.config.log.collation_host_tagged`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:832 +msgid ":ts:cv:`proxy.config.log.max_space_mb_for_orphan_logs`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:838 +msgid "Collating Custom Event Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:840 +msgid "" +"If you use custom event log files, then you must edit the :file:`logs_xml." +"config` file (in addition to configuring a collation server and collation " +"clients)." +msgstr "" + +#: ../../admin/working-log-files.en.rst:844 +msgid "To collate custom event log files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:846 +msgid "On each collation client, edit the `:file:`logs_xml.config`" +msgstr "" + +#: ../../admin/working-log-files.en.rst:847 +msgid "" +"Add the :ref:`CollationHost ` attribute " +"to the :ref:`LogObject` specification: ::" +msgstr "" + +#: ../../admin/working-log-files.en.rst:858 +msgid "" +"where ``ipaddress`` is the hostname or IP address of the collation server " +"to which all log entries (for this object) are forwarded, and ``port`` is " +"the port number for communication between the collation server and " +"collation clients." +msgstr "" + +#: ../../admin/working-log-files.en.rst:863 +msgid "" +"Run the command :option:`traffic_line -L` to restart Traffic Server on the " +"local node or :option:`traffic_line -M` to restart Traffic Server on all " +"the nodes in a cluster." +msgstr "" + +#: ../../admin/working-log-files.en.rst:868 +msgid "Viewing Logging Statistics" +msgstr "" + +#: ../../admin/working-log-files.en.rst:870 +msgid "" +"Traffic Server generates logging statistics that enable you to see the " +"following information:" +msgstr "" + +#: ../../admin/working-log-files.en.rst:873 +msgid "How many log files (formats) are currently being written." +msgstr "" + +#: ../../admin/working-log-files.en.rst:874 +msgid "" +"The current amount of space used by the logging directory, which contains " +"all event and error logs." +msgstr "" + +#: ../../admin/working-log-files.en.rst:876 +msgid "" +"The number of access events written to log files since Traffic Server " +"installation. This counter represents one entry in one file; if multiple " +"formats are being written, then a single event creates multiple event log " +"entries." +msgstr "" + +#: ../../admin/working-log-files.en.rst:880 +msgid "" +"The number of access events skipped (because they were filtered) since " +"Traffic Server installation." +msgstr "" + +#: ../../admin/working-log-files.en.rst:882 +msgid "" +"The number of access events written to the event error log since Traffic " +"Server installation." +msgstr "" + +#: ../../admin/working-log-files.en.rst:885 +msgid "" +"You can retrieve the statistics via the Traffic Line command-line " +"interface; refer to `Monitoring Traffic <../monitoring-traffic>`_." +msgstr "" + +#: ../../admin/working-log-files.en.rst:889 +msgid "Viewing Log Files" +msgstr "" + +#: ../../admin/working-log-files.en.rst:891 +msgid "" +"You can view the system, event, and error log files Traffic Server creates. " +"You can also delete a log file or copy it to your local system if you have " +"the correct user permissions. Traffic Server displays only one MB of " +"information in the log file. If the log file you select to view is bigger " +"than 1MB, then Traffic Server truncates the file and displays a warning " +"message indicating that the file is too big." +msgstr "" + +#: ../../admin/working-log-files.en.rst:899 +msgid "Online Event Log XML Builder" +msgstr "" + +#: ../../admin/working-log-files.en.rst:901 +msgid "" +"If you need any assistance building your event log, you can try out our " +"`online log builder `_. This " +"is a work in progress, so any comments, critique or suggestions are most " +"welcome." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/admin/working-log-files/log-formats.en.po b/doc/locale/ja/LC_MESSAGES/admin/working-log-files/log-formats.en.po new file mode 100644 index 00000000..039b5bf6 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/admin/working-log-files/log-formats.en.po @@ -0,0 +1,545 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../admin/working-log-files/log-formats.en.rst:4 +msgid "Log Formats" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:26 +msgid "Squid Format" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:32 +#: ../../admin/working-log-files/log-formats.en.rst:115 +msgid "``1``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:29 +msgid "" +"``cqtq`` The client request timestamp in Squid format; the time of the " +"client request in seconds since January 1, 1970 UTC (with millisecond " +"resolution)." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:39 +#: ../../admin/working-log-files/log-formats.en.rst:119 +msgid "``2``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:35 +msgid "" +"``ttms`` The time Traffic Server spent processing the client request; the " +"number of milliseconds between the time the client established the " +"connection with Traffic Server and the time Traffic Server sent the last " +"byte of the response back to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:43 +#: ../../admin/working-log-files/log-formats.en.rst:124 +msgid "``3``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:42 +#: ../../admin/working-log-files/log-formats.en.rst:114 +msgid "``chi`` The IP address of the client’s host machine." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:51 +#: ../../admin/working-log-files/log-formats.en.rst:128 +msgid "``4``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:46 +msgid "" +"``crc/pssc`` The cache result code; how the cache responded to the request: " +"``HIT``, ``MISS``, and so on. Cache result codes are described :ref:`here " +"`. The proxy response status code (the HTTP " +"response status code from Traffic Server to client)." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:56 +#: ../../admin/working-log-files/log-formats.en.rst:132 +msgid "``5``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:54 +msgid "" +"``psql`` The length of the Traffic Server response to the client in bytes, " +"including headers and content." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:60 +#: ../../admin/working-log-files/log-formats.en.rst:136 +msgid "``6``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:59 +msgid "``cqhm`` The client request method: ``GET``, ``POST``, and so on." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:67 +#: ../../admin/working-log-files/log-formats.en.rst:140 +msgid "``7``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:63 +msgid "" +"``cquc`` The client request canonical URL; blanks and other characters that " +"might not be parsed by log analysis tools are replaced by escape sequences. " +"The escape sequence is a percentage sign followed by the ASCII code number " +"of the replaced character in hex." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:72 +#: ../../admin/working-log-files/log-formats.en.rst:147 +msgid "``8``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:70 +msgid "" +"``caun`` The username of the authenticated client. A hyphen (``-``) means " +"that no authentication was required." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:81 +#: ../../admin/working-log-files/log-formats.en.rst:152 +msgid "``9``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:75 +msgid "" +"``phr/pqsn`` The proxy hierarchy route; the route Traffic Server used to " +"retrieve the object." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:79 +msgid "" +"The proxy request server name; the name of the server that fulfilled the " +"request. If the request was a cache hit, then this field contains a hyphen " +"(``-``)." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:86 +#: ../../admin/working-log-files/log-formats.en.rst:157 +msgid "``10``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:84 +msgid "" +"``psct`` The proxy response content type; the object content type taken " +"from the Traffic Server response header." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:88 +msgid "The following figure shows a sample log entry in a ``squid.log`` file." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:94 +msgid "Sample log entry in squid.log" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:97 +msgid "Squid log in XML" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:108 +msgid "Netscape Formats" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:111 +#: ../../admin/working-log-files/log-formats.en.rst:218 +msgid "Netscape Common" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:118 +msgid "``-`` This hyphen (``-``) is always present in Netscape log entries." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:122 +msgid "" +"``caun`` The authenticated client username. A hyphen (``-``) means no " +"authentication was required." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:127 +msgid "" +"``cqtd`` The date and time of the client request, enclosed in brackets." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:131 +msgid "``cqtx`` The request line, enclosed in quotes." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:135 +msgid "``pssc`` The proxy response status code (HTTP reply code)." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:139 +msgid "" +"``pscl`` The length of the Traffic Server response to the client in bytes." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:143 +#: ../../admin/working-log-files/log-formats.en.rst:240 +msgid "Netscape Extended" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:146 +msgid "``sssc`` The origin server response status code." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:150 +msgid "" +"``sshl`` The server response transfer length; the body length in the origin " +"server response to Traffic Server, in bytes." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:155 +msgid "" +"``cqbl`` The client request transfer length; the body length in the client " +"request to Traffic Server, in bytes." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:162 +msgid "``11``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:160 +msgid "" +"``pqbl`` The proxy request transfer length; the body length in the Traffic " +"Server request to the origin server." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:167 +msgid "``12``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:165 +msgid "" +"``cqhl`` The client request header length; the header length in the client " +"request to Traffic Server." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:172 +msgid "``13``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:170 +msgid "" +"``pshl`` The proxy response header length; the header length in the Traffic " +"Server response to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:177 +msgid "``14``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:175 +msgid "" +"``pqhl`` The proxy request header length; the header length in Traffic " +"Server request to the origin server." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:182 +msgid "``15``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:180 +msgid "" +"``sshl`` The server response header length; the header length in the origin " +"server response to Traffic Server." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:189 +msgid "``16``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:185 +msgid "" +"``tts`` The time Traffic Server spent processing the client request; the " +"number of seconds between the time that the client established the " +"connection with Traffic Server and the time that Traffic Server sent the " +"last byte of the response back to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:192 +#: ../../admin/working-log-files/log-formats.en.rst:263 +msgid "Netscape Extended2" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:197 +msgid "``17``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:195 +msgid "" +"``phr`` The proxy hierarchy route; the route Traffic Server used to " +"retrieve the object." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:203 +msgid "``18``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:200 +msgid "" +"``cfsc`` The client finish status code: ``FIN`` if the client request " +"completed successfully or ``INTR`` if the client request was interrupted." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:209 +msgid "``19``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:206 +msgid "" +"``pfsc`` The proxy finish status code: ``FIN`` if the Traffic Server " +"request to the origin server completed successfully or ``INTR`` if the " +"request was interrupted." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:215 +msgid "``20``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:212 +msgid "" +"``crc`` The cache result code; how the Traffic Server cache responded to " +"the request: HIT, MISS, and so on. Cache result codes are described :ref:" +"`here `." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:220 +msgid "" +"The following figure shows a sample log entry in a ``common.log`` file, the " +"list following describes the fields of the format." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:227 +msgid "Sample log entry in common.log" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:230 +msgid "Netscape Common in XML" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:242 +msgid "" +"The following figure shows a sample log entry in an ``extended.log`` file." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:249 +msgid "sample log entry in extended.log" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:252 +#: ../../admin/working-log-files/log-formats.en.rst:275 +msgid "Netscape Extended in XML" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:265 +msgid "" +"The following figure shows a sample log entry in an ``extended2.log`` file." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:272 +msgid "sample log entry in extended2.log" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:288 +msgid "Squid- and Netscape-format: Cache Result Codes" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:290 +msgid "" +"The following table describes the cache result codes in Squid and Netscape " +"log files." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:295 +msgid "``TCP_HIT``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:294 +msgid "" +"A valid copy of the requested object was in the cache and Traffic Server " +"sent the object to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:300 +msgid "``TCP_MISS``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:298 +msgid "" +"The requested object was not in cache, so Traffic Server retrieved the " +"object from the origin server (or a parent proxy) and sent it to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:306 +msgid "``TCP_REFRESH_HIT``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:303 +msgid "" +"The object was in the cache, but it was stale. Traffic Server made an ``if-" +"modified-since`` request to the origin server and the origin server sent a " +"``304`` not-modified response. Traffic Server sent the cached object to the " +"client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:312 +msgid "``TCP_REF_FAIL_HIT``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:309 +msgid "" +"The object was in the cache but was stale. Traffic Server made an ``if-" +"modified-since`` request to the origin server but the server did not " +"respond. Traffic Server sent the cached object to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:318 +msgid "``TCP_REFRESH_MISS``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:315 +msgid "" +"The object was in the cache but was stale. Traffic Server made an ``if-" +"modified-since`` request to the origin server and the server returned a new " +"object. Traffic Server served the new object to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:324 +msgid "``TCP_CLIENT_REFRESH``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:321 +msgid "" +"The client issued a request with a ``no-cache`` header. Traffic Server " +"obtained the requested object from the origin server and sent a copy to the " +"client. Traffic Server deleted the previous copy of the object from cache." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:331 +msgid "``TCP_IMS_HIT``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:327 +msgid "" +"The client issued an ``if-modified-since`` request and the object was in " +"cache & fresher than the IMS date, **or** an ``if-modified-since`` request " +"to the origin server revealed the cached object was fresh. Traffic Server " +"served the cached object to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:338 +msgid "``TCP_IMS_MISS``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:334 +msgid "" +"The client issued an ``if-modified-since request``, and the object was " +"either not in cache or was stale in cache. Traffic Server sent an ``if-" +"modified-since request`` to the origin server and received the new object. " +"Traffic Server sent the updated object to the client." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:342 +msgid "``TCP_SWAPFAIL``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:341 +msgid "" +"The object was in the cache but could not be accessed. The client did not " +"receive the object." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:345 +msgid "``ERR_CLIENT_ABORT``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:345 +msgid "The client disconnected before the complete object was sent." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:348 +msgid "``ERR_CONNECT_FAIL``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:348 +msgid "Traffic Server could not reach the origin server." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:352 +msgid "``ERR_DNS_FAIL``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:351 +msgid "" +"The Domain Name Server (DNS) could not resolve the origin server name, or " +"no DNS could be reached." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:356 +msgid "``ERR_INVALID_REQ``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:355 +msgid "" +"The client HTTP request was invalid. (Traffic Server forwards requests with " +"unknown methods to the origin server.)" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:360 +msgid "``ERR_READ_TIMEOUT``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:359 +msgid "" +"The origin server did not respond to Traffic Server's request within the " +"timeout interval." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:363 +msgid "``ERR_PROXY_DENIED``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:363 +msgid "Client service was denied." +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:368 +msgid "``ERR_UNKNOWN``" +msgstr "" + +#: ../../admin/working-log-files/log-formats.en.rst:366 +msgid "" +"The client connected, but subsequently disconnected without sending a " +"request." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/arch/cache/cache-api.en.po b/doc/locale/ja/LC_MESSAGES/arch/cache/cache-api.en.po new file mode 100644 index 00000000..57605b71 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/cache/cache-api.en.po @@ -0,0 +1,63 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/cache/cache-api.en.rst:22 +msgid "Cache Related API functions" +msgstr "" + +#: ../../arch/cache/cache-api.en.rst:26 +msgid "" +"Set a *flag* that marks a request as cacheable. This is a positive override " +"only, setting *flag* to 0 restores the default behavior, it does not force " +"the request to be uncacheable." +msgstr "" + +#: ../../arch/cache/cache-api.en.rst:30 +msgid "" +"Set the cache key for the transaction *txnp* as the string pointed at by " +"*url* of *length* characters. It need not be ``null`` terminated. This " +"should be called from ``TS_HTTP_READ_REQUEST_HDR_HOOK`` which is before " +"cache lookup but late enough that the HTTP request header is available." +msgstr "" + +#: ../../arch/cache/cache-api.en.rst:34 +msgid "Cache Internals" +msgstr "" + +#: ../../arch/cache/cache-api.en.rst:38 +msgid "" +"A preprocessor macro which computes the maximum size of a fragment based on " +"the value of *big*. This is computed as if the argument where the value of " +"the *big* field in a struct :cpp:class:`Dir`." +msgstr "" + +#: ../../arch/cache/cache-api.en.rst:42 +msgid "" +"A preprocessor macro which computes the block size multiplier for a struct :" +"cpp:class:`Dir` where *big* is the *big* field value." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/arch/cache/cache-appendix.en.po b/doc/locale/ja/LC_MESSAGES/arch/cache/cache-appendix.en.po new file mode 100644 index 00000000..ea7092a5 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/cache/cache-appendix.en.po @@ -0,0 +1,253 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/cache/cache-appendix.en.rst:21 +msgid "Cache Tools" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:23 +msgid "Tools and techniques for cache monitoring and inspection." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:25 +msgid ":ref:`The cache inspector `." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:28 +msgid "Topics to be done" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:30 +msgid "Resident alternates" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:31 +msgid "Object refresh" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:34 +msgid "Cache Consistency" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:36 +msgid "" +"The cache is completely consistent, up to and including kicking the power " +"cord out, if the write buffer on consumer disk drives is disabled. You need " +"to use::" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:40 +msgid "" +"The cache validates that all the data for the document is available and " +"will silently mark a partial document as a \"miss\" on read. There is no " +"\"gentle\" shutdown for traffic server, you just kill the process, so the " +"\"recovery\" code (fsck) is run every time traffic server starts up." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:42 +msgid "" +"On startup the two versions of the index are checked, and the last valid " +"one is read into memory. Then traffic server moves forward from the last " +"snapped write cursor and reads all the fragments written to disk, and " +"updates the directory (as in a log-based file system). It stops reading at " +"the write before the last valid write header it sees (as a write is not " +"necessarily atomic because of sector reordering). Then the new updated " +"index is written to the invalid version (in case of a crash during startup) " +"and the system starts." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:47 +msgid "Volume Tagging" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:49 +msgid "" +"Currently cache volumes are allocated somewhat arbitrarily from storage " +"elements. `This enhancement `__ allows the :file:`storage.config` file to assign storage units " +"to specific volumes although the volumes must still be listed in :file:" +"`volume.config` in general and in particular to map domains to specific " +"volumes. A primary use case for this is to be able to map specific types of " +"content to different storage elements. This could to have different storage " +"devices for the content (SSD vs. rotational)." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:53 +msgid "Version Upgrade" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:55 +msgid "" +"It is currently the case that any change to the cache format will clear the " +"cache. This is an issue when upgrading the |TS| version and should be kept " +"in mind." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:61 +msgid "Controlling the cache key" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:63 +msgid "" +"The cache key is by default the URL of the request. There are two possible " +"choices, the original (\"pristine\") URL and the remapped URL. Which of " +"these is used is determined by the configuration value :ts:cv:`proxy." +"config.url_remap.pristine_host_hdr`." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:65 +msgid "" +"This is an ``INT`` value. If set to ``0`` (disabled) then the remapped URL " +"is used, and if it is not ``0`` (enabled) then the original URL is used. " +"This setting also controls the value of the ``HOST`` header that is placed " +"in the request sent to the origin server, using hostname from the original " +"URL if non-``0`` and the host name from the remapped URL if ``0``. It has " +"no other effects." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:67 +msgid "" +"For caching, this setting is irrelevant if no remapping is done or there is " +"a one to one mapping between the original and remapped URLs." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:69 +msgid "" +"It becomes significant if multiple original URLs are mapped to the same " +"remapped URL. If pristine headers are enabled requests to different " +"original URLs will be stored as distinct objects in the cache. If disabled " +"the remapped URL will be used and there may be collisions. This is bad if " +"the contents different but quite useful if they are the same (e.g., the " +"original URLs are just aliases for the same underlying server)." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:71 +msgid "" +"This is also an issue if a remapping is changed because it is effectively a " +"time axis version of the previous case. If an original URL is remapped to a " +"different server address then the setting determines if existing cached " +"objects will be served for new requests (enabled) or not (disabled). " +"Similarly if the original URL mapped to a particular URL is changed then " +"cached objects from the initial original URL will be served from the " +"updated original URL if pristine headers is disabled." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:77 +msgid "" +"A plugin that changes the cache key *must* do so consistently for both " +"cache hit and cache miss requests because two different requests that map " +"to the same cache key will be considered equivalent by the cache. Use of " +"the URL directly provides this and so must any substitute. This is entirely " +"the responsibility of the plugin, there is no way for the |TS| core to " +"detect such an occurrence." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:81 +msgid "" +"It is a requirement that the string be syntactically a URL but otherwise it " +"is completely arbitrary and need not have any path. For instance if the " +"company Network Geographics wanted to store certain content under its own " +"cache key, using a document GUID as part of the key, it could use a cache " +"key like ::" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:85 +msgid "" +"The scheme ``ngeo`` was picked because it is *not* a valid URL scheme and " +"so will not collide with any valid URL." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:87 +msgid "" +"This can be useful if the URL encodes both important and unimportant data. " +"Instead of storing potentially identical content under different URLs " +"(because they differ on the unimportant parts) a url containing only the " +"important parts could be created and used." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:89 +msgid "" +"For example, suppose the URL for Network Geographics content encoded both " +"the document GUID and a referral key. ::" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:93 +msgid "" +"We don't want to the same content for every possible referrer. Instead we " +"could use a plugin to convert this to the previous example and requests " +"that differed only in the referrer key would all reference the same cache " +"entry. Note that we would also map ::" +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:97 +msgid "" +"to the same cache key. This can be handy for \"sharing\" content between " +"servers when that content is identical. Note also the plugin can change the " +"cache key or not depending on any data in the request header, for instance " +"not changing the cache key if the request is not in the ``doc`` directory. " +"If distinguishing servers is important that can easily be pulled from the " +"request URL and used in the synthetic cache key. The implementor is free to " +"extract all relevant elements for use in the cache key." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:99 +msgid "" +"While there is explicit no requirement that the synthetic cache key be " +"based on the HTTP request header, in practice it is generally necessary due " +"to the consistency requirement. Because cache lookup happens before " +"attempting to connect to the origin server no data from the HTTP response " +"header is available, leaving only the request header. The most common case " +"is the one described above where the goal is to elide elements of the URL " +"that do not affect the content to minimize cache footprint and improve " +"cache hit rates." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:73 +msgid "" +"These collisions are not of themselves good or bad. An administrator needs " +"to decide which is appropriate for their situation and set the value " +"correspondingly." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:75 +msgid "" +"If a greater degree of control is desired a plugin must be used to invoke " +"the API call :c:func:`TSCacheUrlSet()` to provide a specific cache key. " +"The :c:func:`TSCacheUrlSet()` API can be called as early as " +"``TS_HTTP_READ_REQUEST_HDR_HOOK``, but no later than " +"``TS_HTTP_POST_REMAP_HOOK``. It can be called only once per transaction; " +"calling it multiple times has no additional effect." +msgstr "" + +#: ../../arch/cache/cache-appendix.en.rst:79 +msgid "" +"If :c:func:`TSHttpTxnCacheLookupUrlGet()` is called after new cache url set " +"by :c:func:`TSCacheUrlSet()`, it should use a URL location created by :c:" +"func:`TSUrlCreate()` as its 3rd input parameter instead of getting url_loc " +"from client request." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/arch/cache/cache-arch.en.po b/doc/locale/ja/LC_MESSAGES/arch/cache/cache-arch.en.po new file mode 100644 index 00000000..7a1a3094 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/cache/cache-arch.en.po @@ -0,0 +1,2106 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/cache/cache-arch.en.rst:19 +msgid "Cache Architecture" +msgstr "キャッシュアーキテクチャ" + +#: ../../arch/cache/cache-arch.en.rst:24 +msgid "Introduction" +msgstr "導入" + +#: ../../arch/cache/cache-arch.en.rst:31 +msgid "" +"The purpose of this document is to describe the basic structure and " +"implementation details of the |TS| cache. Configuration of the cache will " +"be discussed only to the extent needed to understand the internal " +"mechanisms. This document will be useful primarily to |TS| developers " +"working on the |TS| codebase or plugins for |TS|. It is assumed the reader " +"is already familiar with the :ref:`admin-guide` and specifically with :ref:" +"`http-proxy-caching` and :ref:`configuring-the-cache` along with the " +"associated configuration files and values." +msgstr "" +"このドキュメントの目的は、 |TS| キャッシュの基本構造と実装の詳細について記述" +"することです。キャッシュの設定については、内部の仕組みを理解するのに必要な範" +"囲でのみ記述します。このドキュメントは、主に |TS| のコードや |TS| のプラグイ" +"ンに関わる |TS| デベロッパーにとって有用となるでしょう。読者は、 :ref:" +"`admin-guide` と、特に :ref:`http-proxy-caching` と :ref:`configuring-the-" +"cache` 、加えて設定ファイルや設定値に詳しいことを前提とします。" + +#: ../../arch/cache/cache-arch.en.rst:37 +msgid "" +"Unfortunately the internal terminology is not particularly consistent so " +"this document will frequently use terms in different ways than they are " +"used in the code in an attempt to create some consistency." +msgstr "" +"不幸なことに、内部用語は一貫していません。そのためこのドキュメントにおいては" +"一貫性を得るためにコード内で使われるものとは違った方法で頻繁に用語を使用しま" +"す。" + +#: ../../arch/cache/cache-arch.en.rst:41 +msgid "Cache Layout" +msgstr "キャッシュレイアウト" + +#: ../../arch/cache/cache-arch.en.rst:48 +msgid "Cache storage" +msgstr "キャッシュストレージ" + +#: ../../arch/cache/cache-arch.en.rst:56 +msgid "Two cache spans" +msgstr "二つのキャッシュスパン" + +#: ../../arch/cache/cache-arch.en.rst:66 +msgid "If the cache volumes for the example cache spans were defined as" +msgstr "" +"もし、例としてキャッシュスパンのキャッシュボリュームが以下のように定義されて" +"いたら" + +#: ../../arch/cache/cache-arch.en.rst:71 +msgid "then the actual layout would look like" +msgstr "実際のレイアウトはこのようになるでしょう。" + +#: ../../arch/cache/cache-arch.en.rst:83 +msgid "" +"The layout and structure of the cache spans, the cache volumes, and the " +"cache stripes that compose them are derived entirely from the :file:" +"`storage.config` and :file:`cache.config` and is recomputed from scratch " +"when the :process:`traffic_server` is started. Therefore any change to " +"those files can (and almost always will) invalidate the existing cache in " +"its entirety." +msgstr "" +"キャッシュスパン、キャッシュボリューム、そしてそれらを構成するキャッシュスト" +"ライプのレイアウトと構造は、全て :file:`storage.config` と:file:`cache." +"config` から取得され、 :process:`traffic_server` が開始された時にはじめから" +"再計算されます。従って、これらのファイルへの任意の変更は(ほとんど常に)それら" +"全ての既存のキャッシュを無効化します。" + +#: ../../arch/cache/cache-arch.en.rst:89 +msgid "Stripe Structure" +msgstr "ストライプ構造" + +#: ../../arch/cache/cache-arch.en.rst:91 +msgid "" +"|TS| treats the storage associated with a cache stripe as an " +"undifferentiated span of bytes. Internally each stripe is treated almost " +"entirely independently. The data structures described in this section are " +"duplicated for each stripe. Internally the term \"volume\" is used for " +"these stripes and implemented primarily in :cpp:class:`Vol`. What a user " +"thinks of as a volume (what this document calls a \"cache volume\") is " +"represented by :cpp:class:`CacheVol`." +msgstr "" +"|TS| は、キャッシュストライプと結び付けられたストレージを区別されないバイト" +"列のスパンとして扱います。内部的に各ストライプはほぼ完全に独立して扱われま" +"す。この節で記述されるデータ構造は、各ストライプに複製されます。内部的に " +"\"ボリューム\" という単語はこれらのストライプに使用され、主に:cpp:class:" +"`Vol` で実装されています。ユーザが思うボリューム(このドキュメントでは " +"\"キャッシュボリューム\")は、 :cpp:class:`CacheVol` で表現されます。" + +#: ../../arch/cache/cache-arch.en.rst:98 +msgid "" +"Stripe assignment must be done before working with an object because the " +"directory is local to the stripe. Any cached objects for which the stripe " +"assignment is changed are effectively lost as their directory data will not " +"be found in the new stripe." +msgstr "" +"ディレクトリはストライプに配置されるため、ストライプ割当はオブジェクトを扱う" +"ような動作をする前に行われなければなりません。ストライプ割当が変更された" +"キャッシュオブジェクトは、新しいストライプ上で発見されないであろうディレクト" +"リデータとして事実上消失します。" + +#: ../../arch/cache/cache-arch.en.rst:106 +msgid "Cache Directory" +msgstr "キャッシュディレクトリ" + +#: ../../arch/cache/cache-arch.en.rst:122 +msgid "" +"The directory is used as a memory resident structure which means a " +"directory entry is as small as possible (currently 10 bytes). This forces " +"some compromises on the data that can be stored there. On the other hand " +"this means that most cache misses do not require disk I/O which has a large " +"performance benefit." +msgstr "" +"ディレクトリはメモリに常住する構成で使用されるので、ディレクトリエントリは可" +"能な限り小さくなっています。(現在は 10 バイト)このことは、そこに保存できる" +"データに幾つかの制約を強要します。一方でほとんどのキャッシュミスはディスク " +"I/O を要求せず、大きな性能面の恩恵を得ることになります。" + +#: ../../arch/cache/cache-arch.en.rst:126 +msgid "" +"An additional point is the directory is always fully sized. Once a stripe " +"is initialized the directory size is fixed and never changed. This size is " +"related (roughly linearly) to the size of the stripe. It is for this reason " +"the memory footprint of |TS| depends strongly on the size of the disk " +"cache. Because the directory size does not change, neither does this memory " +"requirement so |TS| does not consume more memory as more content is stored " +"in the cache. If there is enough memory to run |TS| with an empty cache " +"there is enough to run it with a full cache." +msgstr "" +"加えて、ディレクトリは常に最大のサイズになります。ストライプが一旦初期化され" +"ると、ディレクトリサイズは固定されて二度と変更されません。このサイズは、スト" +"ライプのサイズに(大雑把に、線形に)関係します。この理由は |TS| のメモリ使用" +"量は、ディスクキャッシュのサイズに強く依存することにあります。ディレクトリサ" +"イズは変わらずメモリ要件もまた変わらないため、 |TS| がキャッシュに保存された" +"コンテンツより多いメモリを消費することはありません。キャッシュが空の状態で |" +"TS| を動作させるのに十分なメモリがあるなら、キャッシュが満ちた状態で動作する" +"のに十分です。" + +#: ../../arch/cache/cache-arch.en.rst:193 +msgid "Storage Layout" +msgstr "ストレージレイアウト" + +#: ../../arch/cache/cache-arch.en.rst:195 +msgid "" +"The storage layout is the stripe metadata followed by cached content. The " +"metadata consists of three parts - the stripe header, the directory, and " +"the stripe footer. The metadata is stored twice. The header and the footer " +"are instances of :cpp:class:`VolHeaderFooter`. This is a stub structure " +"which can have a trailing variable sized array. This array is used as the " +"segment free list roots in the directory. Each contains the segment index " +"of the first element of the free list for the segment. The footer is a copy " +"of the header without the segment free lists. This makes the size of the " +"header dependent on the directory but not that of the footer." +msgstr "" +"ストレージレイアウトは、ストライプメタデータの後ろにキャッシュされたコンテン" +"ツが続きます。メタデータは三つの要素、ストライプヘッダ、ディレクトリ、ストラ" +"イプフッタで構成されます。メタデータは二度保存されます。ヘッダとフッタは :" +"cpp:class:`VolHeaderFooter` のインスタンスです。これは可変長配列を末尾に持つ" +"ことができるスタブ構造体です。この配列はディレクトリのルートのセグメントフ" +"リーリストとして使用されます。各要素はセグメントのフリーリストの、最初の要素" +"のセグメントインデックスを持ちます。フッタは、セグメントフリーリストを伴わな" +"いヘッダのコピーです。ヘッダのサイズはディレクトリに依存しますが、フッタは依" +"存しません。" + +#: ../../arch/cache/cache-arch.en.rst:205 +msgid "Each stripe has several values that describe its basic layout." +msgstr "各ストライプは、基本的なレイアウトを表現する幾つかの値を持ちます。" + +#: ../../arch/cache/cache-arch.en.rst:209 +msgid "skip" +msgstr "skip" + +#: ../../arch/cache/cache-arch.en.rst:208 +msgid "" +"The start of stripe data. This represents either space reserved at the " +"start of a physical device to avoid problems with the host operating " +"system, or an offset representing use of space in the cache span by other " +"stripes." +msgstr "" +"ストライプデータの開始地点です。これはホストオペレーティングシステムによる問" +"題を回避するために物理デバイスの始点に予約されたスペース、もしくは他のストラ" +"イプにキャッシュスパンのスペースが使用されていることを表すオフセットのどちら" +"かを表します。" + +#: ../../arch/cache/cache-arch.en.rst:212 +msgid "start" +msgstr "start" + +#: ../../arch/cache/cache-arch.en.rst:212 +msgid "The offset for the start of the content, after the stripe metadata." +msgstr "ストライプメタデータの後からの、コンテンツの開始地点を示すオフセット" + +#: ../../arch/cache/cache-arch.en.rst:215 +msgid "length" +msgstr "length" + +#: ../../arch/cache/cache-arch.en.rst:215 +msgid "Total number of bytes in the stripe. :cpp:member:`Vol::len`." +msgstr "ストライプのバイトの合計値。 :cpp:member:`Vol::len` " + +#: ../../arch/cache/cache-arch.en.rst:218 +msgid "data length" +msgstr "data length" + +#: ../../arch/cache/cache-arch.en.rst:218 +msgid "" +"Total number of blocks in the stripe available for content storage. :cpp:" +"member:`Vol::data_blocks`." +msgstr "" +"コンテンツストレージとして使用可能なストライプのブロックの合計値。:cpp:" +"member:`Vol::data_blocks` " + +#: ../../arch/cache/cache-arch.en.rst:220 +msgid "" +"Great care must be taken with sizes and lengths in the cache code because " +"there are at least three different metrics (bytes, cache blocks, store " +"blocks) used in various places." +msgstr "" +"キャッシュコードの size や length を扱う場合、特に注意しなければなりません。" +"これらは様々な箇所で、少なくとも三つの違うメトリクス(バイト、キャッシュブ" +"ロック、ストアブロック)が使われているからです。" + +#: ../../arch/cache/cache-arch.en.rst:222 +msgid "" +"The total size of the directory (the number of entries) is computed by " +"taking the size of the stripe and dividing by the average object size. The " +"directory always consumes this amount of memory which has the effect that " +"if cache size is increased so is the memory requirement for |TS|. The " +"average object size defaults to 8000 bytes but can be configured using :ts:" +"cv:`proxy.config.cache.min_average_object_size`. Increasing the average " +"object size will reduce the memory footprint of the directory at the " +"expense of reducing the number of distinct objects that can be stored in " +"the cache [#]_." +msgstr "" +"ディレクトリの合計サイズ(エントリ数)は、ストライプのサイズを取得して平均オ" +"ブジェクトサイズで割ることで計算されます。もしキャッシュサイズが |TS| のメモ" +"リ要件により増加する場合、ディレクトリは常に効率的なメモリ量を消費します。平" +"均オブジェクトサイズはデフォルトでは 8000 バイトですが、 :ts:cv:`proxy." +"config.cache.min_average_object_size` で設定できます。平均オブジェクトサイズ" +"を増加させることにより、キャッシュに保存するオブジェクト数を減らすことと引き" +"換えに、ディレクトリのメモリ使用量を減らすことができます。 [#]_" + +#: ../../arch/cache/cache-arch.en.rst:232 +msgid "" +"The content area stores the actual objects and is used as a circular buffer " +"where new objects overwrite the least recently cached objects. The location " +"in a stripe where new cache data is written is called the *write cursor*. " +"This means that objects can be de facto evicted from cache even if they " +"have not expired if the data is overwritten by the write cursor. If an " +"object is overwritten this is not detected at that time and the directory " +"is not updated. Instead it will be noted if the object is accessed in the " +"future and the disk read of the fragment fails." +msgstr "" +"コンテンツエリアは実際のオブジェクトを保存し、最もキャッシュされてから時間が" +"経過したオブジェクトを新たなドキュメントで上書きする循環バッファとして使用さ" +"れます。ストライプの新たなキャッシュデータの位置は、 *書込みカーソル* と呼ば" +"れます。これはデータが書込みカーソルによって上書きされる場合、たとえ失効して" +"いなくても、オブジェクトは事実上キャッシュから立ち退かせられることを意味しま" +"す。もしオブジェクトが上書きされる場合、検出されずディレクトリは更新されませ" +"ん。代わりに、もしオブジェクトが将来アクセスされてフラグメントのディスク読込" +"みが失敗する場合、警告されます。" + +#: ../../arch/cache/cache-arch.en.rst:241 +msgid "The write cursor and documents in the cache." +msgstr "書込みカーソルとキャッシュ内のドキュメント" + +#: ../../arch/cache/cache-arch.en.rst:243 +msgid "Cache data on disk is never updated." +msgstr "ディスク上のキャッシュデータは永遠に更新されません。" + +#: ../../arch/cache/cache-arch.en.rst:245 +msgid "" +"This is a key thing to keep in mind. What appear to be updates (such as " +"doing a refresh on stale content and getting back a 304) are actually new " +"copies of data being written at the write cursor. The originals are left as " +"\"dead\" space which will be consumed when the write cursor arrives at that " +"disk location. Once the stripe directory is updated (in memory!) the " +"original fragment in the cache is effectively destroyed. This is the " +"general space management techinque used in other cases as well. If an " +"object needs to removed from cache, only the directory needs to be changed. " +"No other work (and *particularly* no disk I/O) needs to be done." +msgstr "" +"これは、心に留めておくべき重要な事です。更新されるように見えるもの(新鮮では" +"なくなったコンテンツをリフレッシュし、 304 を返すような)は、実際には書込み" +"カーソルで書き込まれているデータの新しいコピーです。オリジナルは書込みカーソ" +"ルがディスクのその位置に到着する時に消去される、\"死んだ\" スペースとして残" +"されます。一旦ストライプディレクトリが(メモリ内で!)更新されると、キャッ" +"シュ上のオリジナルのフラグメントは事実上破棄されます。これは他のケースでも同" +"様に用いられる、一般的なスペース管理技術です。もしオブジェクトをキャッシュか" +"ら削除する必要がある場合、ディレクトリの変更のみ必要があります。他の動作( *" +"特に* ディスク I/O も)を行う必要はありません。" + +#: ../../arch/cache/cache-arch.en.rst:253 +msgid "Object Structure" +msgstr "オブジェクトの構造" + +#: ../../arch/cache/cache-arch.en.rst:255 +msgid "" +"Objects are stored as two types of data, metadata and content data. " +"Metadata is all the data about the object and the content and includes the " +"HTTP headers. The content data is the content of the object, the octet " +"stream delivered to the client as the object." +msgstr "" +"オブジェクトは二つのデータのタイプ、メタデータとコンテンツデータとして保存" +"されます。メタデータは、 HTTP ヘッダを含むオブジェクトとコンテンツに関する" +"全てのデータです。コンテンツデータはオブジェクトのコンテンツで、オブジェクトと" +"してクライアントに配信されるオクテットストリームです。" + +#: ../../arch/cache/cache-arch.en.rst:259 +msgid "" +"Objects are rooted in a :cpp:class:`Doc` structure stored in the cache. :" +"cpp:class:`Doc` serves as the header data for a fragment and is contained " +"at the start of every fragment. The first fragment for an object is termed " +"the \"first ``Doc``\" and always contains the object metadata. Any " +"operation on the object will read this fragment first. The fragment is " +"located by converting the cache key for the object to a cache ID and then " +"doing a lookup for a directory entry with that key. The directory entry has " +"the offset and approximate size of the first fragment which is then read " +"from the disk. This fragment will contain the request header and response " +"along with overall object properties (such as content length)." +msgstr "" +"オブジェクトは、キャッシュに格納される :cpp:class:`Doc` 構造体をルートと" +"します。:cpp:class:`Doc` はフラグメントのヘッダデータとして提供し、各フラグ" +"メントの先頭に置かれます。これは \" first ``Doc``\" と呼ばれ、常にオブジェクト" +"メタデータに含まれます。オブジェクトの任意の操作は、最初にこのフラグメントを" +"読み込みます。そのフラグメントはオブジェクトのキャッシュキーをキャッシュ ID に" +"変換することにより発見され、その後そのキーを使ってディレクトリエントリが検索" +"されます。ディレクトリエントリは、その時ディスクから読み込まれる先頭の" +"フラグメントのオフセットとおおよそのサイズを持ちます。このフラグメントは、全て" +"のオブジェクトのプロパティ(コンテンツ長のような)に加えて、リクエストヘッダ" +"とレスポンスを持ちます。" + +#: ../../arch/cache/cache-arch.en.rst:269 +msgid "" +"|TS| supports `varying content `_ for objects. These are called *alternates*. All " +"metadata for all alternates is stored in the first fragment including the " +"set of alternates and the HTTP headers for them. This enables `alternate " +"selection `_ to be done after the first " +"``Doc`` is read from disk. An object that has more than one alternate will " +"have the alternate content stored separately from the first fragment. For " +"objects with only one alternate the content may or may not be in the same " +"(first) fragment as the metadata. Each separate alternate content is " +"allocated a directory entry and the key for that entry is stored in the " +"first fragment metadata." +msgstr "" +"|TS| はオブジェクト用に `コンテンツ変換 `_ をサポートします。これらを *代替* と呼びます。" +"全ての代替の全メタデータは、代替のセットとそれらの HTTP ヘッダを含む先頭の" +"フラグメントに格納されます。これは、 first ``Doc``がディスクから読込まれた" +"後に、`代替セクション `_ が実行されて有効に" +"なります。複数の代替を持つオブジェクトは、先頭のフラグメントとは別に" +"保存された代替コンテンツを持ちます。一つの代替のみ持つオブジェクトは、" +"コンテンツはメタデータと同じ(先頭の)フラグメントに置かれるかもしれないし、" +"そうでもないかもしれません。個々の分離された代替コンテンツはディレクトリ" +"エントリを割り当てられ、エントリのキーは先頭のフラグメントのメタデータに" +"保存されます。" + +#: ../../arch/cache/cache-arch.en.rst:278 +msgid "" +"Prior to version 4.0.1 the header data was stored in the :cpp:class:" +"`CacheHTTPInfoVector` class which was marshaled to a variable length area " +"of the on disk image, followed by information about additional fragments if " +"needed to store the object." +msgstr "" +"バージョン 4.0.1 以前は、ヘッダデータはディスクイメージの可変長領域をまとめる " +":cpp:class:`CacheHTTPInfoVector` クラス内に保存され、オブジェクトを保存する" +"必要がある場合、追加フラグメント情報がその後ろに続きます。" + +#: ../../arch/cache/cache-arch.en.rst:285 +msgid "``Doc`` layout 3.2.0" +msgstr "3.2.0 の ``Doc`` レイアウト" + +#: ../../arch/cache/cache-arch.en.rst:287 +msgid "" +"This had the problem that with only one fragment table it could not be " +"reliable for objects with more than one alternate [#]_. Therefore the " +"fragment data was moved from being a separate variable length section of " +"the metadata to being directly incorporated in to the :cpp:class:" +"`CacheHTTPInfoVector`, yielding a layout of the following form." +msgstr "" +"この方法では、一つのフラグメントテーブルのみでは複数の代替を持つオブジェクトに" +"対しては信頼性を確保できない問題がありました。 [#]_ 従って、フラグメントデータ" +"はメタデータ用に分離された可変長領域から :cpp:class:`CacheHTTPInfoVector` へ" +"直接統合されるよう移動し、以下の形式のレイアウトに生まれ変わりました。" + +#: ../../arch/cache/cache-arch.en.rst:294 +msgid "``Doc`` layout 4.0.1" +msgstr "4.0.1 の ``Doc`` レイアウト" + +#: ../../arch/cache/cache-arch.en.rst:296 +msgid "" +"Each element in the vector contains for each alternate, in addition to the " +"HTTP headers and the fragment table (if any), a cache key. This cache key " +"identifies a directory entry that is referred to as the \"earliest ``Doc``" +"\". This is the location where the content for the alternate begins." +msgstr "" +"ベクターの各要素は、各代替に加えて(もしあれば) HTTP ヘッダとフラグメント" +"テーブル、キャッシュキーを含みます。このキャッシュキーは \"earliest " +"``Doc`` \" として参照されるディレクトリエントリを特定します。これは代替の" +"コンテンツの開始位置です。" + +#: ../../arch/cache/cache-arch.en.rst:300 +msgid "" +"When the object is first cached, it will have a single alternate and that " +"will be stored (if not too large) in first ``Doc``. This is termed a " +"*resident alternate* in the code. This can only happen on the initial store " +"of the object. If the metadata is updated (such as a ``304`` response to an " +"``If-Modified-Since`` request) then unless the object is small, the object " +"data will be left in the original fragment and a new fragment written as " +"the first fragment, making the alternate non-resident. \"Small\" is defined " +"as a length smaller than :ts:cv:`proxy.config.cache.alt_rewrite_max_size`." +msgstr "" +"オブジェクトが最初にキャッシュされる際、それは単一の代替を持ち、(大き" +"すぎない場合は) first ``Doc`` に格納されます。これはコード中で *常駐代替* と" +"呼ばれます。この処理はオブジェクトの最初の保存時にのみ発生できます。メタ" +"データが更新される場合(``If-Modified-Since`` リクエストに対し、 ``304`` を" +"返すなど)、オブジェクトが小さくなければ、オブジェクトデータはオリジナルの" +"フラグメントに取り残され、新しいフラグメントが先頭のフラグメントとして書き" +"込まれ、非常駐の代替を作成します。 :ts:cv:`proxy.config.cache." +"alt_rewrite_max_size` より小さい長さを、 \"小さい\" と定義します。" + +#: ../../arch/cache/cache-arch.en.rst:308 +msgid "" +"The :cpp:class:`CacheHTTPInfoVector` is stored only in the first ``Doc``. " +"Subsequent ``Doc`` instances for the object, including the earliest " +"``Doc``, should have an ``hlen`` of zero and if not, it is ignored." +msgstr "" +":cpp:class:`CacheHTTPInfoVector` は first ``Doc`` にのみ保存されます。" +"earliest ``Doc`` を含む、 first ``Doc`` に続くオブジェクトの ``Doc`` インス" +"タンスはゼロの ``hlen`` を持つはずであり、もしそうでないものは無視されます。" + +#: ../../arch/cache/cache-arch.en.rst:311 +msgid "" +"Large objects are split in to multiple fragments when written to the cache. " +"This is indicated by a total document length that is longer than the " +"content in first ``Doc`` or an earliest ``Doc``. In such a case a fragment " +"offset table is stored. This contains the byte offset in the object content " +"of the first byte of content data for each fragment past the first (as the " +"offset for the first is always zero). This allows range requests to be " +"serviced much more efficiently for large objects, as intermediate fragments " +"that do not contain data in the range can be skipped. The last fragment in " +"the sequence is detected by the fragment size and offset reaching the end " +"of the total size of the object, there is no explicit end mark. Each " +"fragment is computationally chained from the previous in that the cache key " +"for fragment N is computed by::" +msgstr "" +"巨大なオブジェクトは、キャッシュに書き込まれる時に複数のフラグメントに分割され" +"ます。これは、ドキュメント長の合計が first ``Doc`` もしくは earliest ``Doc`` " +"のコンテンツより長いことを示します。このような場合には、フラグメントオフセット" +"テーブルが保存されます。このテーブルは(オフセットが常にゼロであるような)" +"先頭のフラグメントの次以降の各フラグメントのコンテンツデータの先頭バイト" +"を指し示す、オブジェクトコンテンツ全体の中でのバイトオフセットを含みます。" +"これは範囲内のデータを含まない、中間にあるフラグメントをスキップ可能にする" +"ことで、巨大なオブジェクトを非常に効率的に提供するためのレンジリクエストを" +"可能にします。シーケンスの最後のフラグメントは、明示的なエンドマーク無しに" +"フラグメントサイズとオフセットがオブジェクトの合計サイズの最後に到達することに" +"より検出されます。各フラグメントは、計算的に前のものと繋がっています。" +"フラグメント N のキャッシュキーは、以下により計算されます::" + +#: ../../arch/cache/cache-arch.en.rst:322 +msgid "" +"where ``next_key`` is a global function that deterministically computes a " +"new cache key from an existing cache key." +msgstr "" +"``next_key`` は既存のキャッシュキーから新しいキャッシュキーを決定論的に計算" +"するグローバル関数です。" + +#: ../../arch/cache/cache-arch.en.rst:324 +msgid "" +"Objects with multiple fragments are laid out such that the data fragments " +"(including the earliest ``Doc``) are written first and the first ``Doc`` is " +"written last. When read from disk, both the first and earliest ``Doc`` are " +"validated (tested to ensure that they haven't been overwritten by the write " +"cursor) to verify that the entire document is present on disk (as they " +"bookend the other fragments - the write cursor cannot overwrite them " +"without overwriting at leastone of the verified ``Doc`` instances). Note " +"that while the fragments of a single object are ordered they are not " +"necessarily contiguous as data from different objects are interleaved as " +"the data arrives in |TS|." +msgstr "" +"複数のフラグメントを伴うオブジェクトは、(earliest ``Doc`` を含む)データ" +"フラグメントが先頭に書き込まれ、 first ``Doc`` が末尾に書き込まれる形で" +"レイアウトされます。ディスクから読み込まれる時、先頭と earliest ``Doc`` の" +"両方は、ドキュメント全部がディスクに存在することを確認するため検証されます。" +"(それらが書込みカーソルにより上書きされていないことを確認することで試験" +"されます) (これらの ``Doc`` は他のフラグメントのブックエンドとなります。" +"書込みカーソルは、検証された ``Doc`` インスタンスの少なくとも一つの上書き" +"無しに、ドキュメントを上書き出来ません)単一のオブジェクトのフラグメントは、 " +"|TS| に届いたデータとして異なるオブジェクトのデータが挟み込められてしまう" +"ため、必然的に隣接しないように並べられていることに注意してください。" + +#: ../../arch/cache/cache-arch.en.rst:334 +msgid "Multi-alternate and multi-fragment object storage" +msgstr "オブジェクトストレージの、複数の代替と複数のフラグメント" + +#: ../../arch/cache/cache-arch.en.rst:338 +msgid "" +"Documents which are \"pinned\" into the cache must not be overwritten so " +"they are \"evacuated\" from in front of the write cursor. Each fragment is " +"read and rewritten. There is a special lookup mechanism for objects that " +"are being evacuated so that they can be found in memory rather than the " +"potentially unreliable disk regions. The cache scans ahead of the write " +"cursor to discover pinned objects as there is a dead zone immediately " +"before the write cursor from which data cannot be evacuated. Evacuated data " +"is read from disk and placed in the write queue and written as its turn " +"comes up." +msgstr "" +"キャッシュへ \"ピン留め\" されたドキュメントは上書きされてはならず、そのため" +"それらは書込みカーソルの前に \"退避\" させられます。各フラグメントは読み込ま" +"れ、再書込みされます。潜在的に信頼性の低いディスク領域ではなくメモリ上で検出" +"できるよう、退避されるオブジェクトの特殊な検出メカニズムが存在します。データを" +"退避させることができない書込みカーソル直前のデットゾーンがあるため、ピン留め" +"されたオブジェクトを発見するためにキャッシュは書込みカーソルより先にスキャン" +"します。退避されたデータはディスクから読み込まれて書込みキューに置かれ、出番が" +"来ると書き込まれます。" + +#: ../../arch/cache/cache-arch.en.rst:344 +msgid "" +"It appears that objects can only be pinned via the :file:`cache.config` " +"file and if the :ts:cv:`proxy.config.cache.permit.pinning` is set to non-" +"zero (it is zero by default). Objects which are in use when the write " +"cursor is near use the same underlying evacuation mechanism but are handled " +"automatically and not via the explicit ``pinned`` bit in :cpp:class:`Dir`." +msgstr "" +"オブジェクトは :file:`cache.config` ファイルと、 :ts:cv:`proxy.config.cache." +"permit.pinning` にゼロでない値(デフォルトではゼロ)を設定した場合のみピン留め" +"できます。書込みカーソルが接近している際に使用中のオブジェクトは、基本的な退避" +"メカニズムを同様に使用しますが、 :cpp:class:`Dir` の明示的な ``ピン留め`` " +"無しに自動的に扱われます。" + +#: ../../arch/cache/cache-arch.en.rst:349 +msgid "" +"It could, under certain circumstances, be accurate for none of the " +"alternates." +msgstr "それは、ある状況下では代替が正確になりません。" + +#: ../../arch/cache/cache-arch.en.rst:352 +msgid "Additional Notes" +msgstr "追加情報" + +#: ../../arch/cache/cache-arch.en.rst:354 +msgid "Some general observations on the data structures." +msgstr "データ構造のいくつかの概説。" + +#: ../../arch/cache/cache-arch.en.rst:357 +msgid "Cyclone buffer" +msgstr "循環バッファ" + +#: ../../arch/cache/cache-arch.en.rst:359 +msgid "" +"Because the cache is a cyclone cache objects are not preserved for an " +"indefinite time. Even if the object is not stale it can be overwritten as " +"the cache cycles through its volume. Marking an object as ``pinned`` " +"preserves the object through the passage of the write cursor but this is " +"done by copying the object across the gap, in effect re-storing it in the " +"cache. Pinning large objects or a large number objects can lead to a " +"excessive disk activity. The original purpose of pinning seems to have been " +"for small, frequently used objects explicitly marked by the administrator." +msgstr "" +"キャッシュは循環するため、キャッシュオブジェクトは無期限の保存はされません。" +"たとえオブジェクトが新鮮であっても、ボリュームのキャッシュサイクルとして上書き" +"する可能性があります。 ``ピン留め`` してオブジェクトをマーキングすることにより" +"書込みカーソルの通過をやり過ごしてオブジェクトを保護することができますが、これ" +"は書込みカーソルを跨いでオブジェクトをコピーすること、実際にはキャッシュ内に" +"再保存を行うことによって処理されます。巨大なオブジェクトや大量のオブジェクトの" +"ピン留めは、過度のディスク動作を引き起こす場合があります。ピン留めの本来の" +"目的は、管理者によって明示的にマークされた小さく頻繁に使用されるオブジェクトの" +"ためのものであったようです。" + +#: ../../arch/cache/cache-arch.en.rst:365 +msgid "" +"This means the purpose of expiration data on objects is simply to prevent " +"them from being served to clients. They are not in the standard sense " +"deleted or cleaned up. The space can't be immediately reclaimed in any " +"event because writing only happens at the write cursor. Deleting an object " +"consists only of removing the directory entries in the volume directory " +"which suffices to (eventually) free the space and render the document " +"inaccessible." +msgstr "" +"循環バッファの目的は、単純にオブジェクトの失効データがクライアントに提供される" +"のを防ぐことです。失効データは一般的な意味での削除やクリーンアップはされま" +"せん。書込み処理は書込みカーソルでのみ発生するので、どんなイベントにおいても" +"スペースは直ちには取り戻せません。オブジェクトの削除は(最終的には)スペースを" +"解放してかつドキュメントをアクセス不可能にするのに十分な処理である、ボリューム" +"ディレクトリのディレクトリエントリの削除のみで成り立ちます。" + +#: ../../arch/cache/cache-arch.en.rst:370 +msgid "" +"Historically the cache is designed this way because web content was " +"relatively small and not particularly consistent. The design also provides " +"high performance and low consistency requirements. There are no " +"fragmentation issues for the storage, and both cache misses and object " +"deletions require no disk I/O. It does not deal particularly well with long " +"term storage of large objects. See the :ref:`volume tagging` appendix for " +"details on some work in this area." +msgstr "" +"ウェブコンテンツは比較的小さく特に一貫もしていなかったので、歴史的に" +"キャッシュはこの方法の通りに設計されています。この設計は高性能でかつ低い" +"一貫性の要件となります。ストレージのフラグメンテーション問題は発生せず、また" +"キャッシュミスやオブジェクトの削除がディスク I/O を要求することもありません。" +"巨大なオブジェクトの長期間保存はうまく扱いません。この部分の動作の詳細について" +"は :ref:`volume tagging` の付録を見てください。" + +#: ../../arch/cache/cache-arch.en.rst:376 +msgid "Disk Failure" +msgstr "ディスク障害" + +#: ../../arch/cache/cache-arch.en.rst:378 +msgid "" +"The cache is designed to be relatively resistant to disk failures. Because " +"each storage unit in each volume is mostly independent the loss of a disk " +"simply means that the corresponding :cpp:class:`Vol` instances (one per " +"cache volume that uses the storage unit) becomes unusable. The primary " +"issue is updating the volume assignment table to both preserve assignments " +"for objects on still operational volumes while distributing the assignments " +"from the failed disk to those operational volumes. This mostly done in::" +msgstr "" +"キャッシュはディスク障害に比較的強いように設計されます。各ボリュームの" +"各ストレージユニットはほぼ独立しているので、ディスクの損失は対応する " +":cpp:class:`Vol` インスタンス(ストレージユニットを使うキャッシュボリューム" +"毎のもの)が使えなくなることを単純に意味します。主な課題は故障したディスクから" +"運用中のボリュームへの割当の配布中に、まだ運用中のボリューム上のオブジェクトの" +"割当を両方保存するためのボリューム割当テーブルの更新処理です。これはほとんどが" +"以下の中で処理されます::" + +#: ../../arch/cache/cache-arch.en.rst:382 +msgid "" +"Restoring a disk to active duty is quite a bit more difficult task. " +"Changing the volume assignment of a cache key renders any currently cached " +"data inaccessible. This is obviouly not a problem when a disk has failed, " +"but is a bit trickier to decide which cached objects are to be de facto " +"evicted if a new storage unit is added to a running system. The mechanism " +"for this, if any, is still under investigation." +msgstr "" +"ディスクを稼働状態に戻すのは非常に困難な作業です。キャッシュキーの" +"ボリューム割当の変更は、全ての現在キャッシュされているデータへのアクセスを" +"不可能にします。ディスクが故障した際にこれは当然ながら問題にはなりませんが、" +"新しいストレージユニットが動作中のシステムに追加された場合、どのキャッシュ" +"されたオブジェクトが事実上追い出されるか決定するのが少々扱いにくい処理です。" +"このためのメカニズムは何かないか、まだ調査です。" + +#: ../../arch/cache/cache-arch.en.rst:385 +msgid "Implementation Details" +msgstr "実装の詳細" + +#: ../../arch/cache/cache-arch.en.rst:388 +msgid "Stripe Directory" +msgstr "ストライプディレクトリ" + +#: ../../arch/cache/cache-arch.en.rst:392 +msgid "The in memory volume directory entries are defined as described below." +msgstr "メモリ上のボリュームディレクトリエントリは、以下の通りに定義されます。" + +#: ../../arch/cache/cache-arch.en.rst:396 +msgid "Defined in |P-CacheDir.h|_." +msgstr "|P-CacheDir.h|_ で定義される。" + +#: ../../arch/cache/cache-arch.en.rst:399 +msgid "Name" +msgstr "名前" + +#: ../../arch/cache/cache-arch.en.rst:399 +msgid "Type" +msgstr "型" + +#: ../../arch/cache/cache-arch.en.rst:399 +msgid "Use" +msgstr "用途" + +#: ../../arch/cache/cache-arch.en.rst:401 +msgid "offset" +msgstr "offset" + +#: ../../arch/cache/cache-arch.en.rst:401 +msgid "unsigned int:24" +msgstr "unsigned int:24" + +#: ../../arch/cache/cache-arch.en.rst:401 +msgid "Offset of first byte of metadata (volume relative)" +msgstr "(ボリュームに関連した)メタデータの先頭バイトのオフセット" + +#: ../../arch/cache/cache-arch.en.rst:402 +msgid "big" +msgstr "big" + +#: ../../arch/cache/cache-arch.en.rst:402 +msgid "unsigned in:2" +msgstr "unsigned in:2" + +#: ../../arch/cache/cache-arch.en.rst:402 +msgid "Size multiplier" +msgstr "サイズの乗数" + +#: ../../arch/cache/cache-arch.en.rst:403 +msgid "size" +msgstr "size" + +#: ../../arch/cache/cache-arch.en.rst:403 +msgid "unsigned int:6" +msgstr "unsigned int:6" + +#: ../../arch/cache/cache-arch.en.rst:403 +msgid "Size" +msgstr "サイズ" + +#: ../../arch/cache/cache-arch.en.rst:404 +msgid "tag" +msgstr "tag" + +#: ../../arch/cache/cache-arch.en.rst:404 +msgid "unsigned int:12" +msgstr "unsigned int:12" + +#: ../../arch/cache/cache-arch.en.rst:404 +msgid "Partial key (fast collision check)" +msgstr "(高速な衝突チェックの為の)キーの一部" + +#: ../../arch/cache/cache-arch.en.rst:405 +msgid "phase" +msgstr "phase" + +#: ../../arch/cache/cache-arch.en.rst:405 +#: ../../arch/cache/cache-arch.en.rst:406 +#: ../../arch/cache/cache-arch.en.rst:407 +#: ../../arch/cache/cache-arch.en.rst:408 +msgid "unsigned int:1" +msgstr "unsigned int:1" + +#: ../../arch/cache/cache-arch.en.rst:405 +msgid "Unknown" +msgstr "不明" + +#: ../../arch/cache/cache-arch.en.rst:406 +msgid "head" +msgstr "head" + +#: ../../arch/cache/cache-arch.en.rst:406 +msgid "Flag: first fragment in an object" +msgstr "オブジェクトの先頭のフラグメントを示すフラグ" + +#: ../../arch/cache/cache-arch.en.rst:407 +#: ../../arch/cache/cache-arch.en.rst:336 +msgid "pinned" +msgstr "pinned" + +#: ../../arch/cache/cache-arch.en.rst:407 +msgid "Flag: document is pinned" +msgstr "ドキュメントがピン留めされていることを示すフラグ" + +#: ../../arch/cache/cache-arch.en.rst:408 +msgid "token" +msgstr "token" + +#: ../../arch/cache/cache-arch.en.rst:408 +msgid "Flag: Unknown" +msgstr "不明なフラグ" + +#: ../../arch/cache/cache-arch.en.rst:409 +msgid "next" +msgstr "next" + +#: ../../arch/cache/cache-arch.en.rst:409 +msgid "unsigned int:16" +msgstr "unsigned int:16" + +#: ../../arch/cache/cache-arch.en.rst:409 +msgid "Segment local index of next entry." +msgstr "次エントリへのセグメントローカルインデックス" + +#: ../../arch/cache/cache-arch.en.rst:410 +msgid "offset_high" +msgstr "offset_high" + +#: ../../arch/cache/cache-arch.en.rst:410 +msgid "inku16" +msgstr "inku16" + +#: ../../arch/cache/cache-arch.en.rst:410 +msgid "High order offset bits" +msgstr "上位オフセットビット" + +#: ../../arch/cache/cache-arch.en.rst:413 +msgid "" +"The stripe directory is an array of ``Dir`` instances. Each entry refers to " +"a span in the volume which contains a cached object. Because every object " +"in the cache has at least one directory entry this data has been made as " +"small as possible." +msgstr "" +"ストライプディレクトリは ``Dir`` インスタンスの配列です。各エントリは" +"キャッシュされたオブジェクトを持つボリュームのスパンを参照します。キャッシュ内" +"の各オブジェクトは少なくとも一つのディレクトリエントリを持つため、このデータは" +"可能な限り小さくなるよう設計されました。" + +#: ../../arch/cache/cache-arch.en.rst:415 +msgid "" +"The offset value is the starting byte of the object in the volume. It is 40 " +"bits long split between the *offset* (lower 24 bits) and *offset_high* " +"(upper 16 bits) members. Note that since there is a directory for every " +"storage unit in a cache volume, this is the offset in to the slice of a " +"storage unit attached to that volume." +msgstr "" +"オフセット値はボリューム内におけるオブジェクトの開始バイトです。それは " +"*offset* (下位 24 ビット) と *offset_high* (上位 16 ビット) メンバに渡って" +"分割された 40 ビット長です。キャッシュボリュームの各ストレージユニット毎に" +"ディレクトリは存在するため、この値はボリュームに接続されたストレージユニットの" +"一部分へのオフセットであることに注意してください。" + +#: ../../arch/cache/cache-arch.en.rst:419 +msgid "" +"The *size* and *big* values are used to calculate the approximate size of " +"the span which contains the object. This value is used as the number of " +"bytes to read from storage at the offset value. The exact size is contained " +"in the object metadata in :cpp:class:`Doc` which is consulted once the read " +"has completed. For this reason the approximate size needs to be at least as " +"large as the actual size but can be larger, at the cost of reading the " +"extraneous bytes." +msgstr "" +"*size* と *big* 値はオブジェクトを持つスパンの大まかなサイズを計算する為に" +"使用されます。この値はストレージのオフセット値から読込み処理を行うための" +"バイト数として使用されます。正確なサイズは一旦読込みが完了した後に、参照される" +" :cpp:class:`Doc` のオブジェクトメタデータに含まれます。この理由は、大まかな" +"サイズは少なくとも実サイズと同じかそれより大きくする必要があるものの、" +"無関係なバイト列の読込みをするためコストが大きくなることにあります。" + +#: ../../arch/cache/cache-arch.en.rst:421 +msgid "" +"The computation of the approximate size of the fragment is defined as::" +msgstr "" +"フラグメントの大まかなサイズの計算は以下のように定義されます。::" + +#: ../../arch/cache/cache-arch.en.rst:425 +msgid "" +"where ``CACHE_BLOCK_SHIFT`` is the bit width of the size of a basic cache " +"block (9, corresponding to a sector size of 512). Therefore the value with " +"current defines is::" +msgstr "" +"``CACHE_BLOCK_SHIFT`` は基本的なキャッシュブロックのサイズのビット幅(9 で" +"あり、 512 のセクタサイズに対応している) です。従ってその値は現在は以下の" +"ように定義されます。::" + +#: ../../arch/cache/cache-arch.en.rst:429 +msgid "Because *big* is 2 bits the values for the multiplier of *size* are" +msgstr "*big* は 2 ビットであるため、*size* の乗数の値は、" + +#: ../../arch/cache/cache-arch.en.rst:434 +msgid "*big*" +msgstr "*big*" + +#: ../../arch/cache/cache-arch.en.rst:434 +msgid "Multiplier" +msgstr "乗数" + +#: ../../arch/cache/cache-arch.en.rst:434 +msgid "Maximum Size" +msgstr "最大サイズ" + +#: ../../arch/cache/cache-arch.en.rst:436 +msgid "0" +msgstr "0" + +#: ../../arch/cache/cache-arch.en.rst:436 +msgid "512 (2^9)" +msgstr "512 (2^9)" + +#: ../../arch/cache/cache-arch.en.rst:436 +#: ../../arch/cache/cache-arch.en.rst:438 +msgid "32768 (2^15)" +msgstr "32768 (2^15)" + +#: ../../arch/cache/cache-arch.en.rst:437 +msgid "1" +msgstr "1" + +#: ../../arch/cache/cache-arch.en.rst:437 +msgid "4096 (2^12)" +msgstr "4096 (2^12)" + +#: ../../arch/cache/cache-arch.en.rst:437 +#: ../../arch/cache/cache-arch.en.rst:439 +msgid "262144 (2^18)" +msgstr "262144 (2^18)" + +#: ../../arch/cache/cache-arch.en.rst:438 +msgid "2" +msgstr "2" + +#: ../../arch/cache/cache-arch.en.rst:438 +msgid "2097152 (2^21)" +msgstr "2097152 (2^21)" + +#: ../../arch/cache/cache-arch.en.rst:439 +msgid "3" +msgstr "3" + +#: ../../arch/cache/cache-arch.en.rst:439 +msgid "16777216 (2^24)" +msgstr "16777216 (2^24)" + +#: ../../arch/cache/cache-arch.en.rst:442 +msgid "" +"Note also that *size* is effectively offset by one, so a value of 0 " +"indicates a single unit of the multiplier." +msgstr "" +"*size* も 1 が効率的なオフセットであり、そのため 0 は乗数の単一のユニットを" +"示すことに注意してください。" + +#: ../../arch/cache/cache-arch.en.rst:446 +msgid "The target fragment size can set with the :file:`records.config` value" +msgstr "" +"ターゲットフラグメントサイズは :file:`records.config` の値で設定できます。" + +#: ../../arch/cache/cache-arch.en.rst:448 +msgid "``proxy.config.cache.target_fragment_size``" +msgstr "``proxy.config.cache.target_fragment_size``" + +#: ../../arch/cache/cache-arch.en.rst:450 +msgid "" +"This value should be chosen so that it is a multiple of a :ref:`cache entry " +"multiplier `. It is not necessary to make it a power of 2 [#]_. " +"Larger fragments increase I/O efficiency but lead to more wasted space. The " +"default size (1M, 2^20) is a reasonable choice in most circumstances " +"altough in very specific cases there can be benefit from tuning this " +"parameter. |TS| imposes an internal maximum of a 4194232 bytes which is 4M " +"(2^22) less the size of a struct :cpp:class:`Doc`. In practice then the " +"largest reasonable target fragment size is 4M - 262144 = 3932160." +msgstr "" +":ref:`キャッシュエントリ乗数 ` の倍数になるようにこの値を選択する" +"べきです。 2 の冪乗にする必要はありません。 [#]_ 巨大なフラグメントは I/O の" +"効率を向上しますが、無駄なスペースが増加します。デフォルトサイズ(1M, " +"2^20)はこのパラメータを合わせることにより恩恵が得られる、非常に特殊なケースを" +"除いてほとんどの環境において合理的な選択です。 |TS| は :cpp:class:`Doc` " +"構造のサイズより少ない 4M(2^22) である 4194232 バイトの内部的な最大値を" +"強制します。事実上、最大の合理的なターゲットフラグメントサイズは 4M - 262144 " +"= 3932160 です。" + +#: ../../arch/cache/cache-arch.en.rst:456 +msgid "" +"When a fragment is stored to disk the size data in the cache index entry is " +"set to the finest granularity permitted by the size of the fragment. To " +"determine this consult the :ref:`cache entry multipler ` table, " +"find the smallest maximum size that is at least as large as the fragment. " +"That will indicate the value of *big* selected and therefore the " +"granularity of the approximate size. That represents the largest possible " +"amount of wasted disk I/O when the fragment is read from disk." +msgstr "" +"フラグメントがディスクへ保存される際、キャッシュインデックスエントリのサイズ" +"データはフラグメントのサイズにより許容される最小粒度で設定されます。これが " +":ref:`キャッシュエントリ乗数 ` テーブルを参照するかを決定する" +"ため、少なくともフラグメントと同じくらい大きな、最も小さな最大サイズを検索" +"します。それは選択された *big* の値であり、従って大まかなサイズの粒度を示すで" +"しょう。それはフラグメントがディスクから読み込まれる際に発生しうる最大の余分な" +"ディスク I/O の量を表します。" + +#: ../../arch/cache/cache-arch.en.rst:464 +msgid "" +"The set of index entries for a volume are grouped in to *segments*. The " +"number of segments for an index is selected so that there are as few " +"segments as possible such that no segment has more than 2^16 entries. Intra-" +"segment references can therefore use a 16 bit value to refer to any other " +"entry in the segment." +msgstr "" +"ボリュームのインデックスエントリのセットは *セグメント* へグループ化されます。" +"インデックスのセグメントの数は、 2^16 エントリを超えるセグメント数にならない" +"ように可能な限り少ないセグメントになるよう選択されます。内部セグメントの参照は" +"そのため、セグメントの任意の他のエントリを参照する為に 16 ビット値を使用" +"します。" + +#: ../../arch/cache/cache-arch.en.rst:468 +msgid "" +"Index entries in a segment are grouped *buckets* each of ``DIR_DEPTH`` " +"(currently 4) entries. These are handled in the standard hash table way, " +"giving somewhat less than 2^14 buckets per segment." +msgstr "" +"セグメントのインデックスエントリは ``DIR_DEPTH`` (現在は 4)エントリ毎に " +"*バケット* へグループ化されます。これらはセグメント毎に 2^14 未満のバケットを" +"割り当てる標準的なハッシュテーブルの手法で処理されます。" + +#: ../../arch/cache/cache-arch.en.rst:471 +msgid "The comment in :file:`records.config` is simply wrong." +msgstr ":file:`records.config` のコメントは単純に間違っています。" + +#: ../../arch/cache/cache-arch.en.rst:476 +msgid "Directory Probing" +msgstr "ディレクトリ検索" + +#: ../../arch/cache/cache-arch.en.rst:478 +msgid "" +"Directory probing is locating a specific directory entry in the stripe " +"directory based on a cache ID. This is handled primarily by the function :" +"cpp:func:`dir_probe()`. This is passed the cache ID (:arg:`key`), a stripe " +"(:arg:`d`), and a last collision (:arg:`last_collision`). The last of these " +"is an in and out parameter, updated as useful during the probe." +msgstr "" +"ディレクトリ検索はキャッシュ ID に基づくストライプディレクトリの特定の" +"ディレクトリエントリを検索します。これは主に :cpp:func:`dir_probe()` 関数に" +"よって処理されます。この関数はキャッシュ ID(:arg:`key`) 、ストライプ(" +":arg:`d`)、そして最後の衝突(:arg:`last_collision`)が渡されます。最後の衝突" +"はパラメータを読み書きして検索に役立つよう更新されます。" + +#: ../../arch/cache/cache-arch.en.rst:483 +msgid "" +"Given an ID, the top half (64 bits) is used as a :ref:`segment ` index, taken modulo the number of segments in the directory. The " +"bottom half is used as a :ref:`bucket ` index, taken modulo the " +"number of buckets per segment. The :arg:`last_collision` value is used to " +"mark the last matching entry returned by `dir_probe`." +msgstr "" +"与えられた ID の上位半分(64 ビット)は :ref:`セグメント ` " +"インデックスとして使用され、ディレクトリのセグメント数の剰余として扱われます。" +"下位半分は :ref:`バケット ` インデックスとして使用され、" +"セグメント毎のバケット数の剰余として扱われます。:arg:`last_collision` の" +"値は `dir_probe` の戻り値となる最後にマッチするエントリをマークするために使用" +"されます。" + +#: ../../arch/cache/cache-arch.en.rst:487 +msgid "" +"After computing the appropriate bucket, the entries in that bucket are " +"searched to find a match. In this case a match is detected by comparison of " +"the bottom 12 bits of the cache ID (the *cache tag*). The search starts at " +"the base entry for the bucket and then proceeds via the linked list of " +"entries from that first entry. If a tag match is found and there is no :arg:" +"`collision` then that entry is returned and :arg:`last_collision` is " +"updated to that entry. If :arg:`collision` is set, then if it isn't the " +"current match the search continues down the linked list, otherwise :arg:" +"`collision` is cleared and the search continues. The effect of this is that " +"matches are skipped until the last returned match (:arg:`last_collision`) " +"is found, after which the next match (if any) is returned. If the search " +"falls off the end of the linked list then a miss result is returned (if no " +"last collision), otherwise the probe is restarted after clearing the " +"collision on the presumption that the entry for the collision has been " +"removed from the bucket. This can lead to repeats among the returned values " +"but guarantees that no valid entry will be skipped." +msgstr "" +"適切なバケットを算出した後、バケットのエントリはマッチするものを探すために" +"検索されます。この場合、マッチするかがキャッシュキーの下位 12 ビット" +"( *キャッシュタグ* )の比較によって検出されます。検索はバケットのベース" +"エントリから開始して、その後先頭のエントリからのエントリ連結リストから" +"進めます。タグのマッチが発見されていてかつ :arg:`collision` が無い場合、" +"エントリは返されてそのエントリへの :arg:`last_collision` が更新されます。 " +":arg:`collision` がセットされていて現在マッチしていない場合、検索は連結" +"リストの次へ続きます。あるいは :arg:`collision` をクリアし、検索を継続します。" +"これにより最後に返されたマッチしたもの (:arg:`last_collision`) が見つかる" +"までマッチングがスキップされ、その後に次のマッチするものが(もしあるなら)" +"返されます。検索が連結リストの最後まで到達した場合、失敗した結果が返される" +"(もし最後の衝突がないなら)、あるいは衝突したエントリがバケットから削除されて" +"いるとの想定の上で、衝突を解消した後に検索が再開されます。この処理は値が返され" +"るまで繰り返すことができますが、正常なエントリがスキップされることがないことを" +"保証します。" + +#: ../../arch/cache/cache-arch.en.rst:498 +msgid "" +"Last collision can therefore be used to restart a probe at a later time. " +"This is important because the match returned may not be the actual object - " +"although the hashing of the cache ID to a bucket and the tag matching is " +"unlikely to create false positives, that is possible. When a fragment is " +"read the full cache ID is available and checked and if wrong, that read can " +"be discarded and the next possible match from the directory found because " +"the cache virtual connection tracks the last collision value." +msgstr "" +"最後の衝突は従って後で検索を再開するのに使用できます。返されたマッチしたものが" +"実際のオブジェクトではないかも知れないため、これは重要です。バケットへの" +"キャッシュ ID のハッシュ化とタグマッチングが偽陽性を発生しそうに無くても、" +"それは使用可能です。フラグメントが読み込まれる際、キャッシュ ID の全てが使用" +"可能でありチェックされた結果誤りがある場合、その読込みは破棄が可能であり" +"キャッシュ仮想接続は最後の衝突値を追跡するためディレクトリから次のマッチする" +"可能性があるものが検索されます。" + +#: ../../arch/cache/cache-arch.en.rst:506 +msgid "Cache Operations" +msgstr "キャッシュ操作" + +#: ../../arch/cache/cache-arch.en.rst:508 +msgid "" +"Cache activity starts after the HTTP request header has been parsed and " +"remapped. Tunneled transactions do not interact with the cache because the " +"headers are never parsed." +msgstr "" +"キャッシュアクティビティは HTTP リクエストヘッダがパースされ、リマップされて" +"から開始します。トンネルされたトランザクションはヘッダがパースされることがない" +"ためキャッシュに影響しません。" + +#: ../../arch/cache/cache-arch.en.rst:510 +msgid "" +"To understand the logic we must introduce the term \"cache valid\" which " +"means something that is directly related to an object that is valid to be " +"put in the cache (e.g. a ``DELETE`` which refers to a URL that is cache " +"valid but cannot be cached itself). This is important because |TS| computes " +"cache validity several times during a transaction and only performs cache " +"operations for cache valid results. The criteria used changes during the " +"course of the transaction as well. This is done to avoid the cost of cache " +"activity for objects that cannot be in the cache." +msgstr "" +"ロジックを理解するため、\"キャッシュバリッド\" という用語を紹介しなければ" +"なりません。これはキャッシュされるのに有効なオブジェクトに直接関係するもの" +"を意味します。(例えばキャッシュバリッドな URL を参照する ``DELETE`` は" +"それ自身はキャッシュできません) |TS| はトランザクション中に何度かキャッシュの" +"正当性を計算してキャッシュバリッドの結果によってのみキャッシュ操作を行うため、" +"この用語は重要です。正当性の基準は同様にトランザクション中の変更を使用しま" +"した。この処理はキャッシュできないオブジェクトの為のキャッシュアクティビティに" +"よるコストを避けるために実行されます。" + +#: ../../arch/cache/cache-arch.en.rst:512 +msgid "" +"The three basic cache operations are lookup, read, and write. We will take " +"deleting entries as a special case of writing where only the volume " +"directory is updated." +msgstr "" +"3 つの基本的なキャッシュ操作は検索、読込み、そして書込みです。エントリの" +"削除はボリュームディレクトリが更新される場合のみの書込みの特殊なケースとして" +"扱います。" + +#: ../../arch/cache/cache-arch.en.rst:514 +msgid "" +"After the client request header is parsed and is determined to be " +"potentially cacheable, a `cache lookup`_ is done. If successful a `cache " +"read`_ is attempted. If either the lookup or the read fails and the content " +"is considered cacheable then a `cache write`_ is attempted." +msgstr "" +"クライアントリクエストヘッダがパースされてキャッシュできると決定された後、 " +"`キャッシュ検索`_ が行われます。成功した場合 `キャッシュ読込み`_ が試みられ" +"ます。検索か読込みのいずれかが失敗してかつコンテンツがキャッシュできると判断" +"される場合、 `キャッシュ書込み`_ が試みられます。" + +#: ../../arch/cache/cache-arch.en.rst:517 +msgid "Cacheability" +msgstr "キャッシャビリティ" + +#: ../../arch/cache/cache-arch.en.rst:519 +msgid "" +"The first thing done with a request with respect to cache is to determine " +"whether it is potentially a valid object for the cache. After initial " +"parsing and remapping this check is done primarily to detect a negative " +"result because if so all further cache processing is skipped -- it will not " +"be put in to the cache nor will a cache lookup be done. There are a number " +"of prerequisites along with configuration options to change them. " +"Additional cacheability checks are done later in the process when more is " +"known about the transaction (such as plugin operations and the origin " +"server response). Those checks are described as appropriate in the sections " +"on the relevant operations." +msgstr "" +"キャッシュに関するリクエストで最初に行われるのは、潜在的にキャッシュするのに" +"有効なオブジェクトであるかどうかを決定することです。最初のパースとリマップの" +"後、このチェックは主にネガティブな結果を検出するために行われます。これはもし" +"ネガティブな結果が出た場合は全ての後のキャッシュ処理がスキップされる、すなわち" +"キャッシュに配置されずキャッシュ検索も実行されなくなるためです。それらを変更" +"する設定オプションに加え、必要条件が数多く存在します。追加のキャッシャビリティ" +"チェックは、トランザクションについてより多くのことが分かった際の処理" +"(プラグイン操作やオリジンサーバレスポンスのような)以降に実行されます。" +"それらのチェックについては関連する操作のセクションで適切に説明します。" + +#: ../../arch/cache/cache-arch.en.rst:521 +msgid "The set of things which can affect cacheability are" +msgstr "キャッシャビリティに影響できる項目のセットは、" + +#: ../../arch/cache/cache-arch.en.rst:523 +msgid "Built in constraints" +msgstr "組込みの制約" + +#: ../../arch/cache/cache-arch.en.rst:524 +msgid "Settings in :file:`records.config`" +msgstr ":file:`records.config` の設定" + +#: ../../arch/cache/cache-arch.en.rst:525 +msgid "Settings in :file:`cache.config`" +msgstr ":file:`cache.config` の設定" + +#: ../../arch/cache/cache-arch.en.rst:526 +msgid "Plugin operations" +msgstr "プラグインの操作" + +#: ../../arch/cache/cache-arch.en.rst:528 +msgid "" +"The initial internal checks, along with their :file:`records.config` " +"overrides[#]_, are done in::" +msgstr "" +"初期の内部チェック、それらの :file:`records.config` を伴ったオーバー" +"ライドは以下の関数内で処理されます。 ::" + +#: ../../arch/cache/cache-arch.en.rst:532 +msgid "The checks that are done are" +msgstr "チェックは以下のように行われます。" + +#: ../../arch/cache/cache-arch.en.rst:537 +msgid "Cacheable Method" +msgstr "キャッシュ可能なメソッド" + +#: ../../arch/cache/cache-arch.en.rst:535 +msgid "" +"The request must be one of ``GET``, ``HEAD``, ``POST``, ``DELETE``, ``PUT``." +msgstr "" +"リクエストは ``GET``, ``HEAD``, ``POST``, ``DELETE``, ``PUT`` のいずれかで" +"なければなりません。" + +#: ../../arch/cache/cache-arch.en.rst:537 +msgid "See ``HttpTransact::is_method_cache_lookupable()``." +msgstr "``HttpTransact::is_method_cache_lookupable()`` を確認してください。" + +#: ../../arch/cache/cache-arch.en.rst:551 +msgid "Dynamic URL" +msgstr "動的 URL" + +#: ../../arch/cache/cache-arch.en.rst:540 +msgid "" +"|TS| tries to avoid caching dynamic content because it's dynamic. A URL is " +"considered dynamic if it" +msgstr "" +"URL が動的であるため |TS| は動的コンテンツのキャッシングを避けようとします。" +"もし以下の条件に当てはまるなら URL は動的であると考えられます。" + +#: ../../arch/cache/cache-arch.en.rst:542 +msgid "is not ``HTTP`` or ``HTTPS``" +msgstr "``HTTP`` や ``HTTPS`` ではない" + +#: ../../arch/cache/cache-arch.en.rst:543 +msgid "has query parameters" +msgstr "クエリパラメータを含む" + +#: ../../arch/cache/cache-arch.en.rst:544 +msgid "ends in ``asp``" +msgstr "``asp`` で終わる" + +#: ../../arch/cache/cache-arch.en.rst:545 +msgid "has ``cgi`` in the path" +msgstr "パスに ``cgi`` を含む" + +#: ../../arch/cache/cache-arch.en.rst:547 +msgid "This check can be disabled by setting a non-zero value for::" +msgstr "このチェックは以下にゼロでない値を設定することで無効にできます::" + +#: ../../arch/cache/cache-arch.en.rst:551 +msgid "" +"In addition if a TTL is set for rule that matches in :file:`cache.config` " +"then this check is not done." +msgstr "" +"加えて、 :file:`cache.config` にマッチするルールに TTL が設定されていたら" +"このチェックは行われません。" + +#: ../../arch/cache/cache-arch.en.rst:554 +msgid "Range Request" +msgstr "レンジリクエスト" + +#: ../../arch/cache/cache-arch.en.rst:554 +msgid "" +"Cache valid only if :ts:cv:`proxy.config.http.cache.range.lookup` in :file:" +"`records.config` is non-zero. This does not mean the range request can be " +"cached, only that it might be satisfiable from the cache." +msgstr "" +":file:`records.config` の :ts:cv:`proxy.config.http.cache.range.lookup` が" +"ゼロでない値である場合に限りキャッシュバリッドです。これはレンジリクエストが" +"キャッシュ可能であることを意味しません。キャッシュから満たされるかもしれないと" +"いうだけです。" + +#: ../../arch/cache/cache-arch.en.rst:556 +msgid "" +"A plugin can call :c:func:`TSHttpTxnReqCacheableSet()` to force the request " +"to be viewed as cache valid." +msgstr "" +"プラグインはリクエストがキャッシュバリッドと見なされる事を強制するため " +":c:func:`TSHttpTxnReqCacheableSet()` を呼び出せます。" + +#: ../../arch/cache/cache-arch.en.rst:558 +msgid "" +"The code appears to check :file:`cache.config` in this logic by setting the " +"``does_config_permit_lookup`` in the ``cache_info.directives`` of the state " +"machine instance but I can find no place where the value is used. The " +"directive ``does_config_permit_storing`` is set and later checked so the " +"directive (from the administrator point of view) is effective in preventing " +"caching of the object." +msgstr "" +"コード上ではこのロジックにおいてステートマシンインスタンスの " +"``cache_info.directives`` の中の ``does_config_permit_lookup`` の値を設定する" +"ことにより :file:`cache.config` をチェックするように見えますが、その値が使用" +"されている箇所は発見できません。 ``does_config_permit_storing`` ディレクティブ" +"が設定されてその後にチェックされるので、このディレクティブは(管理者の観点" +"から)オブジェクトのキャッシュすることを妨げるのに効果的です。" + +#: ../../arch/cache/cache-arch.en.rst:561 +msgid "Cache Lookup" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:563 +msgid "" +"If the initial request is not determined to be cache invalid then a lookup " +"is done. Cache lookup determines if an object is in the cache and if so, " +"where it is located. In some cases the lookup proceeds to read the first " +"``Doc`` from disk to verify the object is still present in the cache." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:565 +msgid "There are three basic steps to a cache lookup." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:567 +msgid "The cache key is computed." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:569 +msgid "" +"This is normally computed using the request URL but it can be overridden :" +"ref:`by a plugin ` . As far as I can tell the cache index string " +"is not stored anywhere, it presumed computable from the client request " +"header." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:571 +msgid "The cache stripe is determined (based on the cache key)." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:573 +msgid "" +"The cache key is used as a hash key in to an array of :cpp:class:`Vol` " +"instances. The construction and arrangement of this array is the essence of " +"how volumes are assigned." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:577 +msgid "" +"Various other lookaside directories are checked as well, such as the :ref:" +"`aggregation buffer `." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:579 +msgid "" +"If the directory entry is found the first ``Doc`` is read from disk and " +"checked for validity." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:581 +msgid "" +"This is done in :cpp:func:`CacheVC::openReadStartHead()` or :cpp:func:" +"`CacheVC::openReadStartEarliest()` which are tightly coupled methods." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:583 +msgid "" +"If the lookup succeeds then a more detailed directory entry (struct :cpp:" +"class:`OpenDir`) is created. Note that the directory probe includes a check " +"for an already extant ``OpenDir`` which if found is returned without " +"additional work." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:586 +msgid "Cache Read" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:588 +msgid "" +"Cache read starts after a successful `cache lookup`_. At this point the " +"first ``Doc`` has been loaded in to memory and can be consulted for " +"additional information. This will always contain the HTTP headers for all " +"alternates of the object." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:0 +msgid "Read while write" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:592 +msgid "" +"There is provision in the code to support \"read while write\", that is " +"serving an object from cache in one transaction while it is being written " +"in another. It is unclear to me if this actually works. It must " +"specifically enabled in :file:`records.config` and if not, a cache read " +"will fail if the object is currently be written or updated." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:594 +msgid "" +"At this point an alternate for the object is selected. This is done by " +"comparing the client request to the stored response headers, but it can be " +"controlled by a plugin using ``TS_HTTP_ALT_SELECT_HOOK``." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:596 +msgid "" +"The content can now be checked to see if it is stale by calculating the " +"\"freshness\" of the object. This is essential checking how old the object " +"is by looking at the headers and possibly other metadata (note the headers " +"can't be checked until we've selected an alternate)." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:598 +msgid "Most of this work is done in::" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:602 +msgid "" +"First the TTL (time to live) value which can be set in:file:`cache.config` " +"is checked if the request matches the configuration file line. This is done " +"based on when the object was placed in cache, not on any data in the " +"headers." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:604 +msgid "" +"Next an internal flag (\"needs-revalidate-once\") is checked if the :file:" +"`cache.config` value \"revalidate-after\" is not set, and if set the object " +"is marked \"stale\"." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:606 +msgid "After these checks the object age is calculated by::" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:610 +msgid "" +"and then any configured fuzzing is applied. The limits to this age based on " +"available data is calculated by::" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:614 +msgid "" +"How this age is used is determined by the :file:`records.config` value::" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:618 +msgid "" +"If this is zero then the built caclulations are used which compare the " +"freshness limits with document age, modified by any of the client supplied " +"cache control values ``max-age``, ``min-fresh``, ``max-stale`` unless " +"explicitly overridden in :file:`cache.config`." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:620 +msgid "" +"If the object is not stale then it is served to the client. If stale the " +"client request may be changed to an ``If Modified Since`` request to " +"revalidate." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:622 +msgid "" +"The request is served using a standard virtual connection tunnel " +"(``HttpTunnel``) with the :cpp:class:`CacheVC` acting as the producer and " +"the client ``NetVC`` acting as the sink. If the request is a range request " +"this can be modified with a transform to select the appropriate parts of " +"the object or, if the request contains a single range, it can use the range " +"acceleration." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:627 +msgid "" +"Range acceleration is done by consulting a fragment offset table attached " +"to the earliest ``Doc`` which contains offsets for all fragments past the " +"first. This allows loading the fragment containing the first requested byte " +"immediately rather than performing reads on the intermediate fragments." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:632 +msgid "Cache Write" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:634 +msgid "" +"Writing to cache is handled by an instance of the class :cpp:class:" +"`CacheVC`. This is a virtual connection which receives data and writes it " +"to cache, acting as a sink. For a standard transaction data transfers " +"between virtual connections (*VConns*) are handled by :cpp:class:" +"HttpTunnel. Writing to cache is done by attaching a ``CacheVC`` instance as " +"a tunnel consumer. It therefore operates in parallel with the virtual " +"connection that transfers data to the client. The data does not flow to the " +"cache and then to the client, it is split and goes both directions in " +"parallel. This avoids any data synchronization issues between the two." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:0 +msgid "Writing to disk" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:643 +msgid "" +"The actual write to disk is handled in a separate thread dedicated to I/O " +"operations, the AIO threads. The cache logic marshals the data and then " +"hands the operation off to the AIO thread which signals back once the " +"operation completes." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:647 +msgid "" +"While each ``CacheVC`` handles its transactions independently, they do " +"interact at the volume level as each ``CacheVC`` makes calls to the volume " +"object to write its data to the volume content. The ``CacheVC`` accumulates " +"data internally until either the transaction is complete or the amount of " +"data to write exceeds the target fragment size. In the former case the " +"entire object is submitted to the volume to be written. In the latter case " +"a target fragment size amount of data is submitted and the ``CacheVC`` " +"continues to operate on subsequent data. The volume in turn places these " +"write requests in an holding area called the `aggregation buffer`_." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:654 +msgid "" +"For objects under the target fragment size there is no consideration of " +"order, the object is simply written to the volume content. For larger " +"objects the earliest ``Doc`` is written first and the first ``Doc`` written " +"last. This provides some detection ability should the object be " +"overwritten. Because of the nature of the write cursor no fragment after " +"the first fragment (in the earliest ``Doc``) can be overwritten without " +"also overwriting that first fragment (since we know at the time the object " +"was finalized in the cache the write cursor was at the position of the " +"first ``Doc``)." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:661 +msgid "" +"It is the responsibility of the ``CacheVC`` to not submit writes that " +"exceed the target fragment size." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:666 +msgid "Update" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:668 +msgid "" +"Cache write also covers the case where an existing object in the cache is " +"modified. This occurs when" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:670 +msgid "" +"A conditional request is made to the origin server and a ``304 - Not " +"Modified`` response is received." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:671 +msgid "" +"An alternate of the object is retrieved from an origin server and added to " +"the object." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:672 +msgid "" +"An alternate of the object is removed (e.g., due to a ``DELETE`` request)." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:674 +msgid "" +"In every case the metadata for the object must be modified. Because |TS| " +"never updates data already in the cache this means the first ``Doc`` will " +"be written to the cache again and the volume directory entry updated. " +"Because a client request has already been processed the first ``Doc`` has " +"been read from cache and is in memory. The alternate vector is updated as " +"appropriate (an entry added or removed, or changed to contain the new HTTP " +"headers), and then written to disk. It is possible for multiple alternates " +"to be updated by different ``CacheVC`` instances at the same time. The only " +"contention is the first ``Doc``, the rest of the data for each alternate is " +"completely independent." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:684 +msgid "Aggregation Buffer" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:686 +msgid "" +"Disk writes to cache are handled through an *aggregation buffer*. There is " +"one for each :cpp:class:`Vol` instance. To minimize the number of system " +"calls data is written to disk in units of roughly :ref:`target fragment " +"size ` bytes. The algorithm used is simple - data is " +"piled up in the aggregation buffer until no more will fit without going " +"over the targer fragment size, at which point the buffer is written to disk " +"and the volume directory entries for objects with data in the buffer are " +"updated with the actual disk locations for those objects (which are " +"determined by the write to disk action). After the buffer is written it is " +"cleared and process repeats. There is a special lookup table for the " +"aggregation buffer so that object lookup can find cache data in that memory." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:694 +msgid "" +"Because data in the aggregation buffer is visible to other parts of the " +"cache, particularly `cache lookup`_, there is no need to push a partial " +"filled aggregation buffer to disk. In effect any such data is effectively " +"memory cached until enough additional cache content arrives to fill the " +"buffer." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:698 +msgid "" +"The target fragment size has little effect on small objects because the " +"fragment sized is used only to parcel out disk write operations. For larger " +"objects the effect very significant as it causes those objects to be broken " +"up in to fragments at different locations on in the volume. Each fragment " +"write has its own entry in the volume directory which are computational " +"chained (each cache key is computed from the previous one). If possible a " +"fragment table is accumulated in the earliest ``Doc`` which has the offsets " +"of the first byte for each fragment." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:705 +msgid "Evacuation Mechanics" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:707 +msgid "" +"By default the write cursor will overwrite (de facto evict from cache) " +"objects as it proceeds once it has gone around the cache stripe at least " +"once. In some cases this is not acceptable and the object is *evacuated* by " +"reading it from the cache and then writing it back to cache which moves the " +"physical storage of the object from in front of the write cursor to behind " +"the write cursor. Objects that are evacuated are handled in this way based " +"on data in stripe data structures (attached to the :cpp:class:`Vol` " +"instance)." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:713 +msgid "" +"Evacuation data structures are defined by dividing up the volume content in " +"to a disjoint and contiguous set of regions of ``EVACUATION_BUCKET_SIZE`` " +"bytes. The :cpp:member:`Vol::evacuate` member is an array with an element " +"for each evacuation region. Each element is a doubly linked list of :cpp:" +"class:`EvacuationBlock` instances. Each instance contains a :cpp:class:" +"`Dir` that specifies the fragment to evacuate. It is assumed that an " +"evacuation block is placed in the evacuation bucket (array element) that " +"corresponds to the evacuation region in which the fragment is located " +"although no ordering per bucket is enforced in the linked list (this " +"sorting is handled during evacuation). Objects are evacuated by specifying " +"the first or earliest fragment in the evactuation block. The evactuation " +"operation will then continue the evacuation for subsequent fragments in the " +"object by adding those fragments in evacuation blocks. Note that the actual " +"evacuation of those fragments is delayed until the write cursor reaches the " +"fragments, it is not ncessarily done at the time the first / earliest " +"fragment is evacuated." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:724 +msgid "" +"There are two types of evacuations, reader based and forced. The " +"``EvacuationBlock`` has a reader count to track this. If the reader count " +"is zero, then it is a forced evacuation and the the target, if it exists, " +"will be evacuated when the write cursor gets close. If the reader value is " +"non-zero then it is a count of entities that are currently expecting to be " +"able to read the object. Readers increment the count when they require read " +"access to the object, or create the ``EvacuationBlock`` with a count of 1. " +"When a reader is finished with the object it decrements the count and " +"removes the ``EvacuationBlock`` if the count goes to zero. If the " +"``EvacuationBlock`` already exists with a count of zero, the count is not " +"modified and the number of readers is not tracked, so the evacuation is " +"valid as long as the object exists." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:732 +msgid "" +"Evacuation is driven by cache writes, essentially in :cpp:member:`Vol::" +"aggWrite`. This method processes the pending cache virtual connections that " +"are trying to write to the stripe. Some of these may be evacuation virtual " +"connections. If so then the completion callback for that virtual connection " +"is called as the data is put in to the aggregation buffer." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:737 +msgid "" +"When no more cache virtual connections can be processed (due to an empty " +"queue or the aggregation buffer filling) then :cpp:member:`Vol::evac_range` " +"is called to clear the range to be overwritten plus an additional :ts:const:" +"`EVACUATION_SIZE` range. The buckets covering that range are checked. If " +"there are any items in the buckets a new cache virtual connection (a \"doc " +"evacuator\") is created and used to read the evacuation item closest to the " +"write cursor (i.e. with the smallest offset in the stripe) instead of the " +"aggregation write proceeding. When the read completes it is checked for " +"validity and if valid, the cache virtual connection for it is placed at the " +"front of the write queue for the stripe and the write aggregation resumed." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:745 +msgid "" +"Before doing a write, the method :cpp:func:`Vol::evac_range()` is called to " +"start an evacuation. If any fragments are found in the buckets in the range " +"the earliest such fragment (smallest offset, closest to the write cursor) " +"is selected and read from disk and the aggregation buffer write is " +"suspended. The read is done via a cache virtual connection which also " +"effectively serves as the read buffer. Once the read is complete, that " +"cache virtual connection instance (the \"doc evacuator\") is place at the " +"front of the stripe write queue and written out in turn. Because the " +"fragment data is now in memory it is acceptable to overwrite the disk image." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:752 +msgid "" +"Note that when normal stripe writing is resumed, this same check is done " +"again, each time evauating (if needed) a fragment and queuing them for " +"writing in turn." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:755 +msgid "" +"Updates to the directory are done when the write for the evacuated fragment " +"completes. Multi-fragment objects are detected after the read completes for " +"a fragment. If it is not the first fragment then the next fragment is " +"marked for evacuation (which in turn, when it is read, will pull the " +"subsequent fragment). The logic doesn't seem to check the length and " +"presumes that the end of the alternate is when the next key is not in the " +"directory." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:760 +msgid "" +"This interacts with the \"one at a time\" strategy of the aggregation write " +"logic. If a fragment is close to the fragment being evacuated it may end up " +"in the same evacuation bucket. Because the aggregation write checks every " +"time for the \"next\" fragment to evacuate it will find that next fragment " +"and evacuate it before it is overwritten." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:777 +msgid "Evacuation Operation" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:779 +msgid "" +"The primary source of fragments to be evacuated are active fragments. That " +"is fragments which are currently open, to be read or written. This is " +"tracked by the reader value in the evacuation blocks noted above." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:781 +msgid "" +"If object pinning is enabled then a scan is done on a regular basis as the " +"write cursor moves to detected pinned objects and mark them for evacuation." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:783 +msgid "" +"Fragments can also be evacuated through *hit evacuation*. This is " +"configured by :ts:cv:`proxy.config.cache.hit_evacuate_percent` and :ts:cv:" +"`proxy.config.cache.hit_evacuate_size_limit`. When a fragment is read it is " +"checked to see if it is close and in front of the write cursor, close being " +"less than the specified percent of the size of the stripe. If set at the " +"default value of 10, then if the fragment is withing 10% of the size of the " +"stripe it is marked for evacuation. This is cleared if the write cursor " +"passes through the fragment while it remains open (as all open objects are " +"evacuated). If when the object is closed the fragment is still marked then " +"it is placed in the appropriate evacuation bucket." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:786 +msgid "Initialization" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:788 +msgid "" +"Initialization starts with an instance of :cpp:class:`Store` reading the " +"storage configuration file, by default :file:`storage.config`. For each " +"valid element in the file an instance of :cpp:class:`Span` is created. " +"These are of basically four types," +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:792 +msgid "File" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:793 +msgid "Directory" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:794 +msgid "Disk" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:795 +msgid "Raw device" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:804 +msgid "" +"After configuration initialization the cache processor is started by " +"calling :ccp:func:`CacheProcessor::start()`. This does a number of things." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:838 +msgid "Footnotes" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:839 +msgid "" +"`Work is under way `_ on " +"extending this to include objects that are in the ram cache." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:842 +msgid "" +"This linked list is mostly ignored in later processing, causing all but one " +"file or directory storage units on the same device to be ignored. See " +"`TS-1869 `_." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:845 +msgid "" +"It is unclear to me how that can happen, as the offsets are computed later " +"and should all be zero at the time the spans are coalesced, and as far as I " +"can tell the sort / coalesce is only done during initialization." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:102 +msgid "cache directory" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:108 +msgid "directory entry" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:109 +msgid "fragment" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:110 +msgid "cache ID" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:267 +msgid "alternate" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:462 +msgid "DIR_DEPTH" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:462 +msgid "index segment" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:462 +msgid "index buckets" +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:26 +msgid "" +"In addition to an HTTP proxy, |ATS| is also an HTTP cache. |TS| can cache " +"any octet stream although it currently supports only those octet streams " +"delivered by the HTTP protocol. When such a stream is cached (along with " +"the HTTP protocol headers) it is termed an :term:`object ` in " +"the cache. Each object is identified by a globally unique value called a :" +"term:`cache key`." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:43 +msgid "" +"The following sections describe how persistent cache data is structured. |" +"TS| treats its persisent storage an undifferentiated collection of bytes, " +"assuming no other structure to it. In particular it does not use the file " +"system of the host operating system. If a file is used it is used only to " +"mark out the set of bytes to be used." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:50 +msgid "" +"The raw storage for the |TS| cache is configured in :file:`storage.config`. " +"Each line in the file defines a :term:`cache span` which is treated as a " +"uniform persistent store." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:58 +msgid "" +"This storage organized in to a set of :term:`cache volume`\\ s which are " +"defined in :file:`volume.config` for the purposes of the administrator. " +"These are the units that used for all other administator level " +"configuration." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:61 +msgid "" +"Cache volumes can be defined by a percentage of the total storage or an " +"absolute amount of storage. By default each cache volume is spread across " +"all of the cache spans for robustness. The intersection of a cache volume " +"and a cache span is a :term:`cache stripe`. Each cache span is divided in " +"to cache stripes and each cache volume is a collection of those stripes." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:76 +msgid "" +"Cache stripes are the fundamental unit of cache for the implementation. A " +"cached object is stored entirely in a single stripe, and therefore in a " +"single cache span - objects are never split across cache spans or volumes. " +"Objects are assigned to a stripe (and hence to a cache volume) " +"automatically based on a hash of the URI used to retrieve the object from " +"the origin server. It is possible to configure this to a limited extent in :" +"file:`hosting.config` which supports content from specific host or domain " +"to be stored on specific cache volumes. In addition, as of version 4.0.1 it " +"is possible to control which cache spans (and hence, which cache stripes) " +"are contained in a specific cache volume." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:114 +msgid "" +"Content in a stripe is tracked via a directory. We call each element of the " +"directory a \"directory entry\" and each is represented by :cpp:class:" +"`Dir`. Each entry refers to a chunk of contiguous storage in the cache. " +"These are referred to variously as \"fragments\", \"segments\", \"docs\" / " +"\"documents\", and a few other things. This document will use the term " +"\"fragment\" as that is the most common reference in the code. The term " +"\"Doc\" (for :cpp:class:`Doc`) will be used to refer to the header data for " +"a fragment. Overall the directory is treated as a hash with the :term:" +"`cache ID` as the key. See :ref:`directory probing ` " +"for how the cache ID is used to locate a directory entry. The cache ID is " +"in turn computed from a :term:`cache key` which by default is the URL of " +"the content." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:135 +msgid "" +"Each entry stores an offset in the stripe and a size. The size stored in " +"the directory entry is an :ref:`approximate size ` which is at " +"least as big as the actual data in the fragment. Exact size data is stored " +"in the fragment header on disk." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:141 +msgid "" +"Data in HTTP headers cannot be examined without disk I/O. This includes the " +"original URL for the object. The cache key is not stored explicitly and " +"therefore cannot be reliably retrieved." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:144 +msgid "" +"The directory is a hash table that uses `chaining `_ for collision " +"resolution. Because each entry is small they are used directly as the list " +"header of the hash bucket." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:151 +msgid "" +"Chaining is implemented by imposing grouping structures on the entries in a " +"directory. The first level grouping is a :term:`directory bucket`. This is " +"a fixed number (currently 4 - defined as ``DIR_DEPTH``) of entries. This " +"serves to define the basic hash buckets with the first entry in each cache " +"bucket serving as the root of the hash bucket." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:158 +msgid "" +"The term \"bucket\" is used in the code to mean both the conceptual bucket " +"for hashing and for a structural grouping mechanism in the directory and so " +"these will be qualified as needed to distinguish them. The unqualified term " +"\"bucket\" is almost always used to mean the structural grouping in the " +"directory." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:162 +msgid "" +"Directory buckets are grouped in to :term:`segments `. " +"All segments in a stripe have the same number of buckets. The number of " +"segments in a stripe is chosen so that each segment has as many buckets as " +"possible without exceeding 65535 (2\\ :sup:`16`\\ -1) entries in a segment." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:169 +msgid "" +"Each directory entry has a previous and next index value which is used to " +"link entries in the same segment. Because no segment has more than 65535 " +"entries 16 bits suffices for storing the index values. The stripe header " +"contains an array of entry indices which are used as the roots of entry " +"free lists, one for each segment. Active entries are stored via the bucket " +"structure. When a stripe is initialized the first entry in each bucket is " +"zeroed (marked unused) and all other entries are put in the corresponding " +"segment free list in the stripe header. This means the first entry of each " +"directory bucket is used as the root of a hash bucket and is therefore " +"marked unused rather than being put a free list. The other entries in the " +"directory bucket are preferentially preferred for adding to the " +"corresponding hash bucket but this is not required. The segment free lists " +"are initialized such that the extra bucket entries are added in order - all " +"the seconds, then the thirds, then the fourths. Because the free lists are " +"FIFOs this means extra entries will be selected from the fourth entries " +"across all the buckets first, then the thirds, etc. When allocating a new " +"directory entry in a bucket the entries are searched from first to last, " +"which maximizes bucket locality (that is, cache IDs that map to the same " +"hash bucket will also tend to use the same directory bucket)." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:185 +msgid "" +"Entries are removed from the free list when used and returned when no " +"longer in use. When a fragment needs to be put in to the directory the " +"cache ID is used to locate a hash bucket (which also determines the segment " +"and directory bucket). If the first entry in the directory bucket is marked " +"unused, it is used. If not then the other entries in the bucket are " +"searched and if any are on the free list, that entry is used. If none are " +"available then the first entry on the segment free list is used. This entry " +"is attached to the hash bucket via the same next and previous indices used " +"for the free list so that it can be found when doing a lookup of a cache ID." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:575 +msgid "" +"The cache stripe directory :ref:`is probed ` using " +"the index key computed from the cache key." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:797 +msgid "" +"After creating all the `Span` instances they are grouped by device id to " +"internal linked lists attached to the :cpp:member:`Store::disk` array [#]_. " +"Spans that refer to the same directory, disk, or raw device are coalesced " +"in to a single span. Spans that refer to the same file with overlapping " +"offsets are also coalesced [#]_. This is all done in :c:func:" +"`ink_cache_init()` called during startup." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:802 +msgid "" +"The span logic is also used by the HostDB and more than one otherwise " +"inexplicable feature is provided by the span logic for that module." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:807 +msgid "" +"For each valid span, an instance of :cpp:class:`CacheDisk` is created. This " +"class is a continuation and so can be used to perform potentially blocking " +"operations on the span. The primary use of these is to be passed to the AIO " +"threads as the callback when an I/O operation completes. These are then " +"dispatched to AIO threads to perform storage unit initialization. After all " +"of those have completed, the resulting storage is distributed across the " +"volumes in :c:func:`cplist_reconfigure`. The :cpp:class:`CacheVol` " +"instances are created at this time." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:813 +msgid "" +"Cache stripe assignment setup is done once all stripes have initialized " +"(that is, the stripe header information has been successfully read from " +"disk for all stripes). The assignment information is stored as an array of " +"indices. These are indices in to an array of stripes. Both the assignment " +"and the stripe arrays are stored in an instance of :cpp:class:" +"`CacheHostRecord`. Assignment initialization consists of populating the " +"assignment array which is much larger than the stripe array." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:819 +msgid "" +"There is an instance of :cpp:class:`CacheHostRecord` for each line in :file:" +"`hosting.config` and one \"generic\" record. For the configured instances " +"the set of stripes is determined from the cache volume specified in the " +"line. If no lines are specified all stripes are placed in the generic " +"record, otherwise only those stripes marked as default are placed in the " +"generic record." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:822 +msgid "" +"If hosting records are specified it is an error to not specify at least one " +"default cache volume." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:824 +msgid "" +"The assignment table is initialized in :c:func:`build_vol_hash_table` which " +"is called for each :cpp:class:`CacheHostRecord` instance. For each strip in " +"the host record a sequence of pseudo-random numbers is generated, starting " +"with the folded hash of the stripe hash identifier, which is the device " +"path followed by the skip and size values for that stripe, making it " +"unique. This also makes the sequence deterministic for any particular " +"stripe. Each stripe gets one number in its sequence for every " +"`VOL_HASH_ALLOC_SIZE` (8 MB currently) of storage. These numbers are paired " +"with the stripe index, combined across all stripes, then sorted by the " +"random values. The resulting array is sampled for every slot in the stripe " +"assignment table by dividing the maximum random value by the size of the " +"assignment table and using the value midway between each multiple of the " +"result of the division. The coalesced psuedo-random sequence is scanned for " +"each sample in turn and the first number not greater than the sample is " +"found. The stripe associated with that value is used for that assignment " +"table entry." +msgstr "" + +#: ../../arch/cache/cache-arch.en.rst:835 +msgid "" +"While this procedure is determinstic it is sensitive to initial conditions, " +"including the size of each stripe." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/arch/cache/cache-data-structures.en.po b/doc/locale/ja/LC_MESSAGES/arch/cache/cache-data-structures.en.po new file mode 100644 index 00000000..55a8669c --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/cache/cache-data-structures.en.po @@ -0,0 +1,288 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/cache/cache-data-structures.en.rst:19 +msgid "Cache Data Structures" +msgstr "キャッシュデータ構造" + +#: ../../arch/cache/cache-data-structures.en.rst:25 +msgid "" +"An open directory entry. It contains all the information of a :cpp:class:" +"`Dir` plus additional information from the first :cpp:class:`Doc`." +msgstr "" +"オープンされたディレクトリエントリ。 :cpp:class:`Dir` と、それに加えて 先頭" +"の :cpp:class:`Doc` の全情報を含みます。" + +#: ../../arch/cache/cache-data-structures.en.rst:29 +msgid "A virtual connection class which accepts input for writing to cache." +msgstr "キャッシュへ書込む入力を受け付ける仮想接続クラス。" + +#: ../../arch/cache/cache-data-structures.en.rst:33 +msgid "Do the initial read for a cached object." +msgstr "キャッシュされたオブジェクトの初回の読込みを実行します。" + +#: ../../arch/cache/cache-data-structures.en.rst:37 +msgid "Do the initial read for an alternate of an object." +msgstr "オブジェクトの代替の初回の読込みを実行します。" + +#: ../../arch/cache/cache-data-structures.en.rst:41 +msgid "" +"Data transfer driver. This contains a set of *producers*. Each producer is " +"connected to one or more *consumers*. The tunnel handles events and buffers " +"so that data moves from producers to consumers. The data, as much as " +"possible, is kept in reference counted buffers so that copies are done only " +"when the data is modified or for sources (which acquire data from outside |" +"TS|) and sinks (which move data to outside |TS|)." +msgstr "" +"データ転送ドライバ。これは *プロデューサ* のセットを含みます。各プロデューサ" +"は一つ以上の *コンシューマ* に接続されます。トンネルは、プロデューサからコン" +"シューマへデータを移動させる為に、イベントとバッファを扱います。データは、" +"データが修正される時のみ、またはソース(|TS| の外からデータを取得するもの)と" +"シンク(|TS| の外へデータを移動するもの)のためにコピーが実行されるため、可能" +"な限り参照カウントバッファに維持されます。" + +#: ../../arch/cache/cache-data-structures.en.rst:45 +msgid "Holds the data from a line in :file:`cache.config`." +msgstr ":file:`cache.config` のある行からデータを保持します。" + +#: ../../arch/cache/cache-data-structures.en.rst:49 +msgid "" +"Defined in |P-CacheHttp.h|_. This is an array of :cpp:class:`HTTPInfo` " +"objects and serves as the respository of information about alternates of an " +"object. It is marshaled as part of the metadata for an object in the cache." +msgstr "" +"|P-CacheHttp.h|_ で定義されます。これは :cpp:class:`HTTPInfo` オブジェクトの" +"配列であり、オブジェクトの代替に関する情報のリポジトリとして提供します。それ" +"はキャッシュ内のオブジェクトのメタデータ部分に配置されます。" + +#: ../../arch/cache/cache-data-structures.en.rst:53 +#: ../../arch/cache/cache-data-structures.en.rst:63 +msgid "Defined in |HTTP.h|_." +msgstr "|HTTP.h|_ で定義されます。" + +#: ../../arch/cache/cache-data-structures.en.rst:55 +msgid "" +"This class is a wrapper for :cpp:class:`HTTPCacheAlt`. It provides the " +"external API for accessing data in the wrapped class. It contains only a " +"pointer (possibly ``NULL``) to an instance of the wrapped class." +msgstr "" +"このクラスは :cpp:class:`HTTPCacheAlt` のラッパーです。ラップされたクラス内" +"のデータへのアクセスのための外部 API を提供します。ラップされたクラスのイン" +"スタンスへのポインター(``NULL`` の可能性がある)のみ保持します。" + +#: ../../arch/cache/cache-data-structures.en.rst:59 +msgid "A typedef for :cpp:class:`HTTPInfo`." +msgstr ":cpp:class:`HTTPInfo` の typedef 。" + +#: ../../arch/cache/cache-data-structures.en.rst:65 +msgid "" +"This is the metadata for a single alternate for a cached object. In " +"contains among other data" +msgstr "" +"キャッシュオブジェクトの単一の代替のメタデータです。下記の他データを含みま" +"す。" + +#: ../../arch/cache/cache-data-structures.en.rst:67 +msgid "The key for the earliest ``Doc`` of the alternate." +msgstr "代替の earliest ``Doc`` のキー。" + +#: ../../arch/cache/cache-data-structures.en.rst:68 +msgid "The request and response headers." +msgstr "リクエストヘッダとレスポンスヘッダ。" + +#: ../../arch/cache/cache-data-structures.en.rst:69 +msgid "The fragment offset table. [#]_" +msgstr "フラグメントオフセットテーブル。 [#]_" + +#: ../../arch/cache/cache-data-structures.en.rst:70 +msgid "Timestamps for request and response from origin server." +msgstr "オリジンサーバからのリクエストとレスポンスのタイムスタンプ。" + +#: ../../arch/cache/cache-data-structures.en.rst:74 +msgid "Record for evacuation." +msgstr "退避のためのレコード。" + +#: ../../arch/cache/cache-data-structures.en.rst:78 +msgid "This represents a storage unit inside a cache volume." +msgstr "キャッシュボリューム内においてストレージユニットを表します。" + +#: ../../arch/cache/cache-data-structures.en.rst:82 +msgid "" +"The number of segments in the volume. This will be roughly the total number " +"of entries divided by the number of entries in a segment. It will be " +"rounded up to cover all entries." +msgstr "" +"ボリュームのセグメント数。セグメント内のエントリ数で分割されたエントリのおお" +"よその合計数。全エントリをカバーするように丸め込まれます。" + +#: ../../arch/cache/cache-data-structures.en.rst:86 +msgid "" +"The number of buckets in the volume. This will be roughly the number of " +"entries in a segment divided by ``DIR_DEPTH``. For currently defined values " +"this is around 16,384 (2^16 / 4). Buckets are used as the targets of the " +"index hash." +msgstr "" +"ボリュームのバケット数。 ``DIR_DEPTH`` で分割されるセグメント内のおおよその" +"エントリ数。現在は約 16,384 (2^16 / 4) で定義されます。バケットはインデック" +"スハッシュの対象として使用されます。" + +#: ../../arch/cache/cache-data-structures.en.rst:90 +msgid "" +"Array of of :cpp:class:`EvacuationBlock` buckets. This is sized so there is " +"one bucket for every evacuation span." +msgstr "" +":cpp:class:`EvacuationBlock` バケットの配列。これは、退避スパンごとに一つの" +"バケットを持つようなサイズになります。" + +#: ../../arch/cache/cache-data-structures.en.rst:94 +msgid "Length of stripe in bytes." +msgstr "バイト単位のストライプ長。" + +#: ../../arch/cache/cache-data-structures.en.rst:98 +msgid "" +"Start an evacuation if there is any :cpp:class:`EvacuationBlock` in the " +"range from *low* to *high*. Return 0 if no evacuation was started, non-zero " +"otherwise." +msgstr "" +"*low* から *high* の範囲に任意の :cpp:class:`EvacuationBlock` がある場合、退" +"避を開始します。退避が開始されなかった場合は 0 を、それ以外は非 0 を返しま" +"す。" + +#: ../../arch/cache/cache-data-structures.en.rst:102 +msgid "A cache volume as described in :file:`volume.config`." +msgstr ":file:`volume.config` で記述されるキャッシュボリューム。" + +#: ../../arch/cache/cache-data-structures.en.rst:106 +msgid "Defined in |P-CacheVol.h|_." +msgstr "|P-CacheVol.h|_ で定義されます。" + +#: ../../arch/cache/cache-data-structures.en.rst:110 +msgid "Validity check value. Set to ``DOC_MAGIC`` for a valid document." +msgstr "" +"正当性チェック値。正当なドキュメントには ``DOC_MAGIC`` が設定されます。" + +#: ../../arch/cache/cache-data-structures.en.rst:114 +msgid "" +"The length of this segment including the header length, fragment table, and " +"this structure." +msgstr "" +"ヘッダ長、フラグメントテーブル、この構造体を含む、このセグメントの長さ。" + +#: ../../arch/cache/cache-data-structures.en.rst:118 +msgid "" +"Total length of the entire document not including meta data but including " +"headers." +msgstr "メタデータを含まない、ヘッダを含むドキュメント全体の長さ。" + +#: ../../arch/cache/cache-data-structures.en.rst:122 +msgid "" +"First index key in the document (the index key used to locate this object " +"in the volume index)." +msgstr "" +"ドキュメントの最初のインデックスキー。(インデックスキーはボリュームインデッ" +"クスにおいてこのオブジェクトを見つけるのに使用されます。)" + +#: ../../arch/cache/cache-data-structures.en.rst:126 +msgid "" +"The index key for this fragment. Fragment keys are computationally chained " +"so that the key for the next and previous fragments can be computed from " +"this key." +msgstr "" +"このフラグメントのインデックスキー。前後のフラグメントのキーがこのキーから計" +"算出来るように、フラグメントキーは計算的に繋がっています。" + +#: ../../arch/cache/cache-data-structures.en.rst:130 +msgid "" +"Document header (metadata) length. This is not the length of the HTTP " +"headers." +msgstr "ドキュメントヘッダ(メタデータ)長。 HTTP ヘッダの長さではありません。" + +#: ../../arch/cache/cache-data-structures.en.rst:134 +msgid "" +"Fragment type. Currently only `CACHE_FRAG_TYPE_HTTP` is used. Other types " +"may be used for cache extensions if those are ever used / implemented." +msgstr "" +"フラグメントタイプ。現在、 `CACHE_FRAG_TYPE_HTTP` のみ使用されます。キャッ" +"シュ拡張がいつか使用/実装されれば、他のタイプが使用されるかもしれません。" + +#: ../../arch/cache/cache-data-structures.en.rst:138 +msgid "" +"Fragment table length, if any. Only the first ``Doc`` in an object should " +"contain a fragment table." +msgstr "" +"もしあるなら、フラグメントテーブル長。オブジェクトの、先頭の ``Doc`` のみが" +"フラグメントテーブルを持つべきです。" + +#: ../../arch/cache/cache-data-structures.en.rst:140 +msgid "" +"The fragment table is a list of offsets relative to the HTTP content (not " +"counting metadata or HTTP headers). Each offset is the byte offset of the " +"first byte in the fragment. The first element in the table is the second " +"fragment (what would be index 1 for an array). The offset for the first " +"fragment is of course always zero and so not stored. The purpose of this is " +"to enable a fast seek for range requests - given the first ``Doc`` the " +"fragment containing the first byte in the range can be computed and loaded " +"directly without further disk access." +msgstr "" +"フラグメントテーブルは、 HTTP コンテンツ(メタデータや HTTP ヘッダは含みませ" +"ん)に関係したオフセットのリストです。各オフセットは、フラグメントの最初のバ" +"イトのバイトオフセットです。テーブルの最初の要素は、二番目のフラグメントで" +"す。(配列のインデックス 1 となるでしょう) 最初のフラグメントのオフセットはも" +"ちろん常にゼロであり、保存されません。この目的は、レンジリクエストの高速な検" +"索を可能にすることです。先頭の ``Doc`` が指定されると、範囲の最初のバイトを" +"含むフラグメントはそれ以上のディスクアクセス無しに、直接計算して読み込むこと" +"ができます。" + +#: ../../arch/cache/cache-data-structures.en.rst:142 +msgid "Removed as of version 3.3.0." +msgstr "バージョン 3.3.0 で削除されました。" + +#: ../../arch/cache/cache-data-structures.en.rst:146 +#: ../../arch/cache/cache-data-structures.en.rst:150 +msgid "Unknown." +msgstr "不明。" + +#: ../../arch/cache/cache-data-structures.en.rst:154 +msgid "Flag and timer for pinned objects." +msgstr "ピン留めされたオブジェクトのフラグとタイマー。" + +#: ../../arch/cache/cache-data-structures.en.rst:158 +msgid "Unknown. (A checksum of some sort)" +msgstr "不明。(何らかのチェックサム)" + +#: ../../arch/cache/cache-data-structures.en.rst:163 +msgid "Footnotes" +msgstr "脚注" + +#: ../../arch/cache/cache-data-structures.en.rst:164 +msgid "" +"Changed in version 3.2.0. This previously resided in the first ``Doc`` but " +"that caused different alternates to share the same fragment table." +msgstr "" +"バージョン 3.2.0 で変更されました。以前は先頭の ``Doc`` 内で持ちましたが、同" +"一フラグメントテーブルを共有するために異なる代替を引き起こしました。" diff --git a/doc/locale/ja/LC_MESSAGES/arch/cache/cache.en.po b/doc/locale/ja/LC_MESSAGES/arch/cache/cache.en.po new file mode 100644 index 00000000..982a8883 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/cache/cache.en.po @@ -0,0 +1,35 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/cache/cache.en.rst:19 +msgid "Apache Traffic Server Cache" +msgstr "" + +#: ../../arch/cache/cache.en.rst:21 +msgid "Contents:" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/arch/cache/ram-cache.en.po b/doc/locale/ja/LC_MESSAGES/arch/cache/ram-cache.en.po new file mode 100644 index 00000000..e66111b6 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/cache/ram-cache.en.po @@ -0,0 +1,271 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-01-15 22:31+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/cache/ram-cache.en.rst:22 +msgid "Ram Cache" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:25 +msgid "New Ram Cache Algorithm (CLFUS)" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:27 +msgid "" +"The new Ram Cache uses ideas from a number of cache replacement policies " +"and algorithms, including LRU, LFU, CLOCK, GDFS and 2Q, called CLFUS " +"(Clocked Least Frequently Used by Size). It avoids any patented algorithms " +"and includes the following features:" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:29 +msgid "" +"Balances Recentness, Frequency and Size to maximize hit rate (not byte hit " +"rate)." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:30 +msgid "" +"Is Scan Resistant and extracts robust hit rates even when the working set " +"does not fit in the Ram Cache." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:31 +msgid "" +"Supports compression at 3 levels fastlz, gzip(libz), and xz(liblzma). " +"Compression can be moved to another thread." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:32 +msgid "" +"Has very low CPU overhead, only little more than a basic LRU. Rather than " +"using an O(lg n) heap, it uses a probabilistic replacement policy for O(1) " +"cost with low C." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:33 +msgid "" +"Has relatively low memory overhead of approximately 200 bytes per object in " +"memory." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:35 +msgid "" +"The rational for emphasizing hit rate over byte hit rate is that the " +"overhead of pulling more bytes from secondary storage is low compared to " +"the cost of a request." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:37 +msgid "" +"The Ram Cache consists of an object hash fronting 2 LRU/CLOCK lists and a " +"\"Seen\" hash table. The first \"Cached\" list contains objects in memory " +"while the second contains a \"History\" of objects which have either " +"recently been in memory or are being considered for keeping in memory. The " +"\"Seen\" hash table is used to make the algorithm scan resistant." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:39 +msgid "The list entries record the following information:" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:41 +msgid "key - 16 byte unique object identifier" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:42 +msgid "" +"auxkeys - 8 bytes worth of version number (in our system the block in the " +"partition). When the version of an object changes old entries are purged " +"from the cache." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:43 +msgid "hits - number of hits within this clock period" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:44 +msgid "size - the size of the object in the cache" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:45 +msgid "" +"len - the actual length of the object (differs from size because of " +"compression and padding)" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:46 +msgid "compressed_len - the compressed length of the object" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:47 +msgid "compressed (none, fastlz, libz, liblzma)" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:48 +msgid "uncompressible (flag)" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:49 +msgid "" +"copy - whether or not this object should be copied in and copied out (e.g. " +"HTTP HDR)" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:50 +msgid "LRU link" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:51 +msgid "HASH link" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:52 +msgid "IOBufferData (smart point to the data buffer)" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:55 +msgid "" +"The interface to the cache is Get and Put operations. Get operations check " +"if an object is in the cache and are called on a read attempt. The Put " +"operation decides whether or not to cache the provided object in memory. " +"It is called after a read from secondary storage." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:58 +msgid "Seen Hash" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:60 +msgid "" +"The Seen List becomes active after the Cached and History lists become full " +"after a cold start. The purpose is to make the cache scan resistant which " +"means that the cache state must not be effected at all by a long sequence " +"Get and Put operations on objects which are seen only once. This is " +"essential, without it not only would the cache be polluted, but it could " +"lose critical information about the objects that it cares about. It is " +"therefore essential that the Cache and History lists are not effected by " +"Get or Put operations on objects seen the first time. The Seen Hash " +"maintains a set of 16 bit hash tags, and requests which do not hit in the " +"object cache (are in the Cache List or History List) and do not match the " +"hash tag result in the hash tag begin updated but are otherwise ignored. " +"The Seen Hash is sized to approximately the number of objects in the cache " +"in order to match the number that are passed through it with the CLOCK rate " +"of the Cached and History Lists." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:63 +msgid "Cached List" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:65 +msgid "" +"The Cached list contains objects actually in memory. The basic operation " +"is LRU with new entries inserted into a FIFO (queue) and hits causing " +"objects to be reinserted. The interesting bit comes when an object is " +"being considered for insertion. First we check if the Object Hash to see " +"if the object is in the Cached List or History. Hits result in updating " +"the \"hit\" field and reinsertion. History hits result in the \"hit\" " +"field being updated and a comparison to see if this object should be kept " +"in memory. The comparison is against the least recently used members of " +"the Cache List, and is based on a weighted frequency::" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:69 +msgid "" +"A new object must beat enough bytes worth of currently cached objects to " +"cover itself. Each time an object is considered for replacement the CLOCK " +"moves forward. If the History object has a greater value then it is " +"inserted into the Cached List and the replaced objects are removed from " +"memory and their list entries are inserted into the History List. If the " +"History object has a lesser value it is reinserted into the History List. " +"Objects considered for replacement (at least one) but not replaced have " +"their \"hits\" field set to zero and are reinserted into the Cached List. " +"This is the CLOCK operation on the Cached List." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:72 +msgid "History List" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:74 +msgid "" +"Each CLOCK the least recently used entry in the History List is dequeued " +"and if the \"hits\" field is not greater than 1 (it was hit at least once " +"in the History or Cached List) it is deleted, otherwise the \"hits\" is set " +"to zero and it is requeued on the History List." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:77 +msgid "Compression/Decompression" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:79 +msgid "" +"Compression is performed by a background operation (currently called as " +"part of Put) which maintains a pointer into the Cached List and runs toward " +"the head compressing entries. Decompression occurs on demand during a " +"Get. In the case of objects tagged \"copy\" the compressed version is " +"reinserted in the LRU since we need to make a copy anyway. Those not " +"tagged \"copy\" are inserted uncompressed in the hope that they can be " +"reused in uncompressed form. This is a compile time option and may be " +"something we want to change." +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:81 +msgid "" +"There are 3 algorithms and levels of compression (speed on 1 thread i7 " +"920) :" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:83 +msgid "" +"fastlz: 173 MB/sec compression, 442 MB/sec decompression : basically free " +"since disk or network will limit first, ~53% final size" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:84 +msgid "" +"libz: 55 MB/sec compression, 234 MB/sec decompression : almost free, " +"particularly decompression, ~37% final size" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:85 +msgid "" +"liblzma: 3 MB/sec compression, 50 MB/sec decompression : expensive, ~27% " +"final size" +msgstr "" + +#: ../../arch/cache/ram-cache.en.rst:87 +msgid "" +"These are ballpark numbers, and your millage will vary enormously. JPEG " +"for example will not compress with any of these. The RamCache does detect " +"compression level and will declare something \"incompressible\" if it " +"doesn't get below 90% of the original size. This value is cached so that " +"the RamCache will not attempt to compress it again (at least as long as it " +"is in the history)." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/arch/cache/tier-storage.en.po b/doc/locale/ja/LC_MESSAGES/arch/cache/tier-storage.en.po new file mode 100644 index 00000000..aea6dae1 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/cache/tier-storage.en.po @@ -0,0 +1,224 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/cache/tier-storage.en.rst:20 +msgid "Tiered Storage Design" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:26 +msgid "Introduction" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:28 +msgid "" +"Tiered storage is an attempt to allow |TS| to take advantage of physical " +"storage with different properties. This design concerns only mechanism. " +"Policies to take advantage of these are outside of the scope of this " +"document. Instead we will presume an *oracle* which implements this policy " +"and describe the queries that must be answered by the oracle and the " +"effects of the answers." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:33 +msgid "" +"Beyond avoiding question of tier policy the design is also intended to be " +"effectively identical to current operations for the case where there is " +"only one tier." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:36 +msgid "" +"The most common case for tiers is an ordered list of tiers, where higher " +"tiers are presumed faster but more expensive (or more limited in capacity). " +"This is not required. It might be that different tiers are differentiated " +"by other properties (such as expected persistence). The design here is " +"intended to handle both cases." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:46 +msgid "Configuration" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:48 +msgid "" +"Each storage unit in :file:`storage.config` can be marked with a *quality* " +"value which is 32 bit number. Storage units that are not marked are all " +"assigned the same value which is guaranteed to be distinct from all " +"explicit values. The quality value is arbitrary from the point of view of " +"this design, serving as a tag rather than a numeric value. The user (via " +"the oracle) can impose what ever additional meaning is useful on this value " +"(rating, bit slicing, etc.). In such cases all volumes should be explicitly " +"assigned a value, as the default / unmarked value is not guaranteed to have " +"any relationship to explicit values. The unmarked value is intended to be " +"useful in situations where the user has no interest in tiered storage and " +"so wants to let Traffic Server automatically handle all volumes as a single " +"tier." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:58 +msgid "Operations" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:60 +msgid "" +"After a client request is received and processed, volume assignment is " +"done. This would be changed to do volume assignment across all tiers " +"simultaneously. For each tier the oracle would return one of four values " +"along with a volume pointer." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:63 +msgid "`READ`" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:63 +msgid "The tier appears to have the object and can serve it." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:66 +msgid "`WRITE`" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:66 +msgid "" +"The object is not in this tier and should be written to this tier if " +"possible." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:69 +msgid "`RW`" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:69 +msgid "" +"Treat as `READ` if possible but if the object turns out to not in the cache " +"treat as `WRITE`." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:72 +msgid "`NO_SALE`" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:72 +msgid "Do not interact with this tier for this object." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:74 +msgid "" +"The volume returned for the tier must be a volume with the corresponding " +"tier quality value. In effect the current style of volume assignment is " +"done for each tier, by assigning one volume out of all of the volumes of " +"the same quality and returning one of `RW` or `WRITE` depending on whether " +"the initial volume directory lookup succeeds. Note that as with current " +"volume assignment it is presumed this can be done from in memory structures " +"(no disk I/O required)." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:79 +msgid "" +"If the oracle returns `READ` or `RW` for more than one tier, it must also " +"return an ordering for those tiers (it may return an ordering for all " +"tiers, ones that are not readable will be ignored). For each tier, in that " +"order, a read of cache storage is attempted for the object. A successful " +"read locks that tier as the provider of cached content. If no tier has a " +"successful read, or no tier is marked `READ` or `RW` then it is a cache " +"miss. Any tier marked `RW` that fails the read test is demoted to `WRITE`." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:85 +msgid "" +"If the object is cached every tier that returns `WRITE` receives the object " +"to store in the selected volume (this includes `RW` returns that are " +"demoted to `WRITE`). This is a cache to cache copy, not from the origin " +"server. In this case tiers marked `RW` that are not tested for read will " +"not receive any data and will not be further involved in the request " +"processing." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:90 +msgid "" +"For a cache miss, all tiers marked `WRITE` will receive data from the " +"origin server connection (if successful)." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:92 +msgid "" +"This means, among other things, that if there is a tier with the object all " +"other tiers that are written will get a local copy of the object, the " +"origin server will not be used. In terms of implementation, currently a " +"cache write to a volume is done via the construction of an instance of " +"`CacheVC` which recieves the object stream. For tiered storage the same " +"thing is done for each target volume." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:97 +msgid "" +"For cache volume overrides (e.g. via :file:`hosting.config`) this same " +"process is used except with only the volumes stripes contained within the " +"specified cache volume." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:102 +msgid "Copying" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:104 +msgid "" +"It may be necessary to provide a mechanism to copy objects between tiers " +"outside of a client originated transaction. In terms of implementation this " +"is straight forward using `HttpTunnel` as if in a transaction only using a " +"`CacheVC` instance for both the producer and consumer. The more difficult " +"question is what event would trigger a possible copy. A signal could be " +"provided whenever a volume directory entry is deleted although it should be " +"noted that the object in question may have already been evicted when this " +"event happens." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:112 +msgid "Additional Notes" +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:114 +msgid "" +"As an example use, it would be possible to have only one cache volume that " +"uses tiered storage for a particular set of domains using volume tagging. :" +"file:`hosting.config` would be used to direct those domains to the selected " +"cache volume. The oracle would check the URL in parallel and return " +"`NO_SALE` for the tiers in the target cache volume for other domains. For " +"the other tier (that of the unmarked storage units) the oracle would return " +"`RW` for the tier in all cases as that tier would not be queried for the " +"target domains." +msgstr "" + +#: ../../arch/cache/tier-storage.en.rst:40 +msgid "" +"The design presumes that if a user has multiple tiers of storage and an " +"ordering for those tiers, they will usually want content stored at one tier " +"level to also be stored at every other lower level as well, so that it does " +"not have to be copied if evicted from a higher tier." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/arch/hacking/index.en.po b/doc/locale/ja/LC_MESSAGES/arch/hacking/index.en.po new file mode 100644 index 00000000..f3fed527 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/hacking/index.en.po @@ -0,0 +1,42 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-02-05 23:44+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/hacking/index.en.rst:2 +msgid "Hacking" +msgstr "" + +#: ../../arch/hacking/index.en.rst:22 +msgid "Introduction" +msgstr "" + +#: ../../arch/hacking/index.en.rst:24 +msgid "" +"This is a documentation stub on how to hack Apache Traffic Server. Here we " +"try to document things such as how to write and run unit or regression " +"tests or how to inspect the state of the core with a debugger." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/arch/index.en.po b/doc/locale/ja/LC_MESSAGES/arch/index.en.po new file mode 100644 index 00000000..9aaf9b9f --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/index.en.po @@ -0,0 +1,62 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/index.en.rst:22 +msgid "Introduction" +msgstr "導入" + +#: ../../arch/index.en.rst:30 +msgid "Contents:" +msgstr "内容:" + +#: ../../arch/index.en.rst:2 +msgid "Architecture and Hacking" +msgstr "アーキテクチャとハッキング" + +#: ../../arch/index.en.rst:24 +#, fuzzy +msgid "" +"The original architectural documents for Traffic Server were lost in the " +"transition to an open source project. The documents in this section are " +"provisional and were written based on the existing code. The purpose is to " +"have a high level description of aspects of Traffic Server to better inform " +"ongoing work." +msgstr "" +"Traffic Server のオリジナルのアーキテクチャに関するドキュメントは、オープ" +"ンソースプロジェクトへの移行時に失われました。このセクションのドキュメン" +"トは暫定的なものであり、既存のコードを元に書かれています。このドキュメン" +"トの目的は進行中の作業をより知らせるため、 Traffic Server の側面の高度な" +"説明をすることです。" + +#: ../../arch/index.en.rst:28 +msgid "" +"In the final section on \"hacking\" we try to document our approaches to " +"understanding and modifying the source." +msgstr "" +"最終章の\"ハッキング\"ではソースを理解し変更するためのアプローチについて" +"記載します。" diff --git a/doc/locale/ja/LC_MESSAGES/arch/proposals/hostdb.en.po b/doc/locale/ja/LC_MESSAGES/arch/proposals/hostdb.en.po new file mode 100644 index 00000000..d3648fcc --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/arch/proposals/hostdb.en.po @@ -0,0 +1,372 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../arch/proposals/hostdb.en.rst:2 +msgid "Host Resolution" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:22 +msgid "Introduction" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:24 +msgid "" +"The current mechanism for resolving host names to IP addresses for Traffic " +"Server is contained the HostDB and DNS libraries. These take hostnames and " +"provide IP addresses for them." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:27 +msgid "" +"The current implementation is generally considered inadequate, both from a " +"functionality point of view and difficulty in working with it in other " +"parts of Traffic Server. As Traffic Server is used in more complex " +"situtations this inadequacy presents increasing problems." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:32 +msgid "Goals" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:34 +msgid "" +"Updating the host name resolution (currently referred to as \"HostDB\") has " +"several functions goals" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:36 +msgid "Enable additional processing layers to be easily added." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:37 +msgid "Enable plugins to directly access the name resolution logic" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:38 +msgid "Enable plugins to provide name resolution" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:39 +msgid "Asynchronous (immediate resolve or callback on block)" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:40 +msgid "" +"Minimize allocations -- in particular no allocations for cached resolutions" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:41 +msgid "" +"Simplify interactions with the resolution, particularly with regard to " +"nameservers, origin server failover, and address family handling." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:44 +msgid "" +"It is also necessary to support a number of specific features that are " +"either currently available or strongly desired." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:46 +msgid "SplitDNS or its equivalent" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:47 +msgid "Use of a hosts file (e.g. ``/etc/hosts``)" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:48 +msgid "Simultaneous IPv4 and IPv6 queries" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:49 +msgid "IP family control" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:50 +msgid "" +"Negative caching * Server connection failures * Query failures * " +"Nameserver failures." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:54 +msgid "Address validity time out control" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:55 +msgid "Address round robin support" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:56 +msgid "SRV record support (weighted records)" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:57 +msgid "Nameserver round robin" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:58 +msgid "Plugin access to nameserver data (add, remove, enumerate)" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:59 +msgid "Plugin provision of resolvers." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:60 +msgid "Hooks for plugin detection / recovery from resolution events." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:62 +msgid "" +"One issue is persistence of the cached resolutions. This creates problems " +"for the current implementation (because of size limits it imposes on the " +"cached data) but also allows for quicker restarts in a busy environment." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:66 +msgid "Basics" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:68 +msgid "" +"The basic design is to separate the functionality into chainable layers so " +"that a resolver with the desired attributes can be assembled from those " +"layers. The core interface is that of a lazy iterator. This object returns " +"one of four results when asked for an address" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:72 +msgid "An IP address" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:73 +msgid "Done(no more addresses are available)" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:74 +msgid "Wait(an address may be available in the future)" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:75 +msgid "Fail (no address is available and none will be so in the future)" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:77 +msgid "" +"Each layer (except the bottom) uses this API and also provides it. This " +"enables higher level logic such as the state machine to simply use the " +"resolver as a list without having to backtrack states in the case of " +"failures, or have special cases for different resolution sources." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:81 +msgid "" +"To perform a resolution, a client creates a query object (potentially on " +"the stack), initializes it with the required data (at least the hostname) " +"and then starts the resolution. Methods on the query object allow its state " +"and IP address data to be accessed." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:86 +msgid "Required Resolvers" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:90 +msgid "Nameserver" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:89 +msgid "" +"A bottom level resolver that directly queries a nameserver for DNS data. " +"This contains much of the functionality currently in the ``iocore/dns`` " +"directory." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:94 +msgid "SplitDNS" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:93 +msgid "" +"A resolver that directs requests to one of several resolvers. To emulate " +"current behavior these would be Nameserver instances." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:98 +msgid "NameserverGroup" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:97 +msgid "" +"A grouping mechanism for Nameserver instances that provides failover, round " +"robin, and ordering capabilities. It may be reasonable to merge this with " +"the SplitDNS resolver." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:101 +msgid "HostFile" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:101 +msgid "A resolver that uses a local file to resolve names." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:105 +msgid "AddressCache" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:104 +msgid "" +"A resolver that also has a cache for resolution results. It requires " +"another resolver instance to perform the actual resolution." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:109 +msgid "Preloaded" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:108 +msgid "" +"A resolver that can contain one or more explicitly set IP addresses which " +"are returned. When those are exhausted it falls back to another resolver." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:112 +msgid "Configuration" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:114 +msgid "" +"To configuration the resolution, each resolver would be assigned a tag. It " +"is not, however, sufficient to simply provide the list of resolver tags " +"because some resolvers require additional configuration. Unfortunately this " +"will likely require a separate configuration file outside of :file:`records." +"config`, although we would be able to remove :file:`splitdns.config`. In " +"this case we would need chain start / end markers around a list of resolver " +"tags. Each tag would the be able to take additional resolver configuration " +"data. For instance, for a SplitDNS resolver the nameservers." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:121 +msgid "Examples" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:123 +msgid "" +"Transparent operations would benefit from the *Preloaded* resolver. This " +"would be loaded with the origin host address provided by the client " +"connection. This could be done early in processing and then no more logic " +"would be required to skip DNS processing as it would happen without " +"additional action by the state machine. It would handle the problem of de " +"facto denial of service if an origin server becomes unavailable in that " +"configuration, as *Preloaded* would switch to alternate addresses " +"automatically." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:129 +msgid "" +"Adding host file access would be easier as well, as it could be done in a " +"much more modular fashion and then added to the stack at configuration " +"time. Whether such addresses were cached would be controlled by chain " +"arrangement rather yet more configuration knobs." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:133 +msgid "" +"The default configuration would be *Preloaded* : *AddressCache* : " +"*Nameserver*." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:135 +msgid "" +"In all cases the state machine makes requests against the request object to " +"get IP addresses as needed." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:138 +msgid "Issues" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:141 +msgid "Request object allocation" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:143 +msgid "" +"The biggest hurdle is being able to unwind a resolver chain when a block is " +"encountered. There are some ways to deal with this." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:145 +msgid "" +"1) Set a maximum resolver chain length and declare the request instance so " +"that there is storage for state for that many resolvers. If needed and " +"additional value of maximum storage per chain could be set as well. The " +"expected number of elements in a chain is expected to be limited, 10 would " +"likely be a reaosnable limit. If settable at source configuration time this " +"should be sufficient." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:150 +msgid "" +"2) Embed class allocators in resolver chains and mark the top / outermost / " +"first resolver. The maximum state size for a resolution can be calculated " +"when the chain is created and then the top level resolver can use an " +"allocation pool to efficiently allocate request objects. This has an " +"advantage that with a wrapper class the request object can be passed along " +"cheaply. Whether that's an advantage in practice is unclear." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:156 +msgid "Plugin resolvers" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:158 +msgid "" +"If plugins can provide resolvers, how can these can integrated in to " +"existing resolver chains for use by the HTTP SM for instance?" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:162 +msgid "Feedback" +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:164 +msgid "" +"It should be possible for a client to provide feedback about addresses (e." +"g., the origin server at this address is not available). Not all resolvers " +"will handle feedback but some will and that must be possible." +msgstr "" + +#: ../../arch/proposals/hostdb.en.rst:167 +msgid "" +"Related to this is that caching resolvers (such as *AddressCache*) must be " +"able to iterator over all resolved addresses even if their client does not " +"ask for them. In effect they must background fill the address data." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/glossary.en.po b/doc/locale/ja/LC_MESSAGES/glossary.en.po new file mode 100644 index 00000000..a8422984 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/glossary.en.po @@ -0,0 +1,284 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../glossary.en.rst:20 +msgid "Glossary" +msgstr "用語集" + +#: ../../glossary.en.rst:49 +msgid "cache span" +msgstr "キャッシュスパン" + +#: ../../glossary.en.rst:51 +msgid "" +"The physical storage described by a single line in :file:`storage.config`." +msgstr ":file:`storage.config` の各行に記述された物理ストレージです。" + +#: ../../glossary.en.rst:44 +msgid "cache stripe" +msgstr "キャッシュストライプ" + +#: ../../glossary.en.rst:39 +msgid "cache volume" +msgstr "キャッシュボリューム" + +#: ../../glossary.en.rst:41 +msgid "" +"A user defined unit of persistent storage for the cache. Cache volumes are " +"defined in :file:`volume.config`. A cache volume is by default spread " +"across :term:`cache span`\\ s to increase robustness. Each section of a " +"cache volume on a specific cache span is a :term:`cache stripe`." +msgstr "" +"ユーザがキャッシュとして定義した永続ストレージの単位です。キャッシュボリュー" +"ムは :file:`volume.config` で定義されます。キャッシュボリュームは頑強性を高" +"めるため :term:`キャッシュスパン`\\ を跨いで分散されます。特定のキャッシュス" +"パンにおけるキャッシュボリュームの各セクションは :term:`キャッシュストライプ" +"` です。" + +#: ../../glossary.en.rst:24 +msgid "continuation" +msgstr "継続" + +#: ../../glossary.en.rst:26 +msgid "" +"A callable object that contains state. These are are mechanism used by " +"Traffic Server to implement callbacks and continued computations. Continued " +"computations are critical to efficient processing of traffic because by " +"avoiding any blocking operations that wait on external events. In any such " +"case a continuation is used so that other processing can continue until the " +"external event occurs. At that point the continuation is invoked to " +"continue the suspended processing. This can be considered similar to co-" +"routines." +msgstr "" +"状態を持った呼び出し可能なオブジェクトです。 コールバックと継続的な計算を実" +"装するために Traffic Server で用いられるメカニズムです。外部イベントを待つ操" +"作のブロッキングを避けることによりトラフィックを効率良く処理するため、継続的" +"な計算は重要です。そのような場合において、他の処理が外部イベントが起こるまで" +"続けられるようにするため継続が使用されます。その時点で継続は中断した処理を続" +"けるために発動されます。これはコルーチンに似た考えができます。" + +#: ../../glossary.en.rst:77 +msgid "revalidation" +msgstr "再確認" + +#: ../../glossary.en.rst:79 +msgid "" +"Verifying that a currently cached object is still valid. This is usually " +"done using an `If-Modified-Since `_ request which allows the origin server to " +"validate the content without resending the content." +msgstr "" +"現在のキャッシュされたオブジェクトがまだ有効か確認することです。これは通常オ" +"リジンサーバがコンテンツの再送信をすることなくコンテンツの確認を行うことを可" +"能にする `If-Modified-Since `_ リクエストを使って行われます。" + +#: ../../glossary.en.rst:31 +msgid "session" +msgstr "セッション" + +#: ../../glossary.en.rst:33 +msgid "" +"A single connection from a client to Traffic Server, covering all requests " +"and responses on that connection. A session starts when the client " +"connection opens, and ends when the connection closes." +msgstr "" +"全てのリクエストとレスポンスをカバーする、クライアントから Traffic Server へ" +"の単一のコネクションです。セッションはクライアントがコネクションをオープンし" +"た際に開始し、コネクションをクローズする際に終了します。" + +#: ../../glossary.en.rst:74 +msgid "storage unit" +msgstr "ストレージユニット" + +#: ../../glossary.en.rst:76 +msgid "Obsolete term for :term:`cache span`." +msgstr ":term:`キャッシュスパン` の旧称です。" + +#: ../../glossary.en.rst:35 +msgid "transaction" +msgstr "トランザクション" + +#: ../../glossary.en.rst:37 +msgid "" +"A client request and response, either from the origin server or from the " +"cache. A transaction begins when Traffic Server receives a request, and " +"ends when Traffic Server sends the response." +msgstr "" +"クライアントのリクエストと、オリジンサーバーもしくはキャッシュからレスポンス" +"です。トランザクションは Traffic Server がリクエストを受け付けた際に開始" +"し、 Traffic Server がレスポンスを送る際に終了します。" + +#: ../../glossary.en.rst:82 +msgid "write cursor" +msgstr "書込みカーソル" + +#: ../../glossary.en.rst:84 +msgid "The location in a :term:`cache stripe` where new data is written." +msgstr "新しいデータが書き込まれる :term:`キャッシュストライプ` の位置です。" + +#: ../../glossary.en.rst:68 +msgid "alternate" +msgstr "代替" + +#: ../../glossary.en.rst:70 +msgid "" +"A variant of a :term:`cache object`. This was originally created to handle " +"the `VARY mechanism `_ but has since been used for additional purposes. All " +"alternates of an object must be equivalent in some manner, that is they are " +"alternate forms of the same stream. The most common example is having " +"normal and compressed versions of the stream." +msgstr "" +":term:`キャッシュオブジェクト` の変形です。元々は `VARY メカニズム `_ を処理するために" +"作られましたが、以来他の目的のためにも使われています。オブジェクトの全ての代" +"替は何らかの方法で同質にならなければならず、同じストリームの形式に変化しま" +"す。最も一般的な例はストリームの通常版と圧縮版を持つことです。" + +#: ../../glossary.en.rst:97 +msgid "cache fragment" +msgstr "キャッシュフラグメント" + +#: ../../glossary.en.rst:99 +msgid "" +"The unit of storage in the cache. All reads from the cache always read " +"exactly one fragment. Fragments may be written in groups, but every write " +"is always an integral number of fragments. Each fragment has a " +"corresponding :term:`directory entry` which describes its location in the " +"cache storage." +msgstr "" +"キャッシュ内のストレージの単位です。キャッシュの全ての読込みは常にちょうど一" +"つのフラグメントを読み込みます。フラグメントはまとめて書き込まれるかも知れま" +"せんが、全ての書込みは常にフラグメントの整数になります。各フラグメントは " +"キャッシュストレージにおける自身の位置情報を持つ :term:`ディレクトリエントリ" +"` に対応しています。" + +#: ../../glossary.en.rst:56 +msgid "cache ID" +msgstr "キャッシュ ID" + +#: ../../glossary.en.rst:58 +msgid "" +"A 128 bit value used as a fixed sized identifier for an object in the " +"cache. This is computed from the :term:`cache key` using the `MD5 hashing " +"function `_." +msgstr "" +"キャッシュのオブジェクトの固定サイズの識別子として使用される 128 ビットの値" +"です。 `MD5 ハッシュ関数 `_ を" +"用いて :term:`キャッシュキー` から計算されます。" + +#: ../../glossary.en.rst:52 +msgid "cache key" +msgstr "キャッシュキー" + +#: ../../glossary.en.rst:54 +msgid "" +"A byte sequence that is a globally unique identifier for an :term:`object " +"` in the cache. By default the URL for the object is used." +msgstr "" +"キャッシュの :term:`オブジェクト <キャッシュオブジェクト>` の大域的にユニー" +"クな識別子のバイト列です。デフォルトではオブジェクトの URL が使用されます。" + +#: ../../glossary.en.rst:64 +msgid "cache object" +msgstr "キャッシュオブジェクト" + +#: ../../glossary.en.rst:66 +msgid "" +"The minimal self contained unit of data in the cache. Cache objects are the " +"stored version of equivalent content streams from an origin server. A " +"single object can have multiple variants called :term:`alternates " +"`." +msgstr "" +"キャッシュのデータの最小の自己完結した単位です。キャッシュオブジェクトはオリ" +"ジンサーバーからのコンテンツストリームと等価な保存されたバージョンです。単一" +"のオブジェクトは :term:`代替 <代替>` と呼ばれる複数の変形を持ち得ます。" + +#: ../../glossary.en.rst:46 +msgid "" +"A homogenous persistent store for the cache in a single :term:`cache span`. " +"A stripe always resides entirely on a single physical device and is treated " +"as an undifferentiated span of bytes. This is the smallest independent unit " +"of storage." +msgstr "" +"単一の :term:`キャッシュスパン` におけるキャッシュの同質な永続ストアです。ス" +"トライプは常に一つの物理デバイスに全体的に置かれ、区別されないバイト列のスパ" +"ンとして扱われます。これはストレージの最小の独立した単位です。" + +#: ../../glossary.en.rst:60 +msgid "cache tag" +msgstr "キャッシュタグ" + +#: ../../glossary.en.rst:62 +msgid "" +"The bottom few bits (12 currently) of the :term:`cache ID`. This is used in " +"the :ref:`cache directory ` for a preliminary identity " +"check before going to disk." +msgstr "" +":term:`キャッシュ ID` の下位数ビット(現在は 12)です。ディスクへアクセスす" +"る前に予備的な識別チェックを行うために :ref:`キャッシュディレクトリ ` で使用されます。" + +#: ../../glossary.en.rst:90 +msgid "directory bucket" +msgstr "ディレクトリバケット" + +#: ../../glossary.en.rst:92 +msgid "" +"A contiguous fixed sized group of :term:`directory entries `. This is used for hash bucket maintenance optimization." +msgstr "" +"隣接した :term:`ディレクトリエントリ <ディレクトリエントリ>` の固定サイズの" +"グループです。ハッシュバケットのメンテナンスの最適化に使用されます。" + +#: ../../glossary.en.rst:94 +msgid "directory entry" +msgstr "ディレクトリエントリ" + +#: ../../glossary.en.rst:96 +msgid "An in memory entry that describes a :term:`cache fragment`." +msgstr ":term:`キャッシュフラグメント` を指すメモリに置かれるエントリです。" + +#: ../../glossary.en.rst:85 +msgid "directory segment" +msgstr "ディレクトリセグメント" + +#: ../../glossary.en.rst:87 +msgid "" +"A contiguous group of :term:`buckets `. Each :term:`cache " +"stripe` has a set of segments all of which have the same number of buckets, " +"although the number of buckets per segment can vary between cache stripes. " +"Segments are administrative in purpose to minimize the size of free list " +"and hash bucket pointers." +msgstr "" +"隣接した :term:`バケット <ディレクトリバケット>` のグループです。セグメント" +"あたりのバケット数はキャッシュストライプの間で変化する可能性がありますが、" +"各 :term:`キャッシュストライプ` は同じ数のバケットを持つ全てのセグメントの" +"セットを持ちます。セグメントはフリーリストとハッシュバケットポインタのサイズ" +"を最小限にするため管理できます。" diff --git a/doc/locale/ja/LC_MESSAGES/index.po b/doc/locale/ja/LC_MESSAGES/index.po new file mode 100644 index 00000000..cf740003 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/index.po @@ -0,0 +1,73 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../index.rst:2 +msgid "Apache Traffic Server" +msgstr "" + +#: ../../index.rst:22 +msgid "" +"Apache Traffic Server™ speeds Internet access, enhances website " +"performance, and delivers unprecedented web hosting capabilities." +msgstr "" +"Apache Traffic Server™ はインターネットアクセスを加速させ、ウェブサイトのパ" +"フォーマンスを高め、かつて無いウェブホスティング性能を提供します。" + +#: ../../index.rst:26 +msgid "What Is Apache Traffic Server?" +msgstr "Apache Traffic Server とは?" + +#: ../../index.rst:28 +msgid "" +"Traffic Server is a high-performance web proxy cache that improves network " +"efficiency and performance by caching frequently-accessed information at " +"the edge of the network. This brings content physically closer to end " +"users, while enabling faster delivery and reduced bandwidth use. Traffic " +"Server is designed to improve content delivery for enterprises, Internet " +"service providers (ISPs), backbone providers, and large intranets by " +"maximizing existing and available bandwidth." +msgstr "" +"Traffic Server はハイパフォーマンスなウェブプロキシーキャッシュであり、それ" +"はネットワークの端で頻繁にアクセスされる情報をキャッシュすることによりネット" +"ワーク効率とパフォーマンスを改善します。これは物理的に近いエンドユーザーにコ" +"ンテンツを運ぶ一方で、素早い提供と帯域使用量の削減を可能にします。Traffic " +"Server は商用のコンテンツ配信やインターネットサービスプロバイダー( ISP )や" +"バックボーンプロバイダーや巨大なイントラネットを現行の利用可能な帯域を最大化" +"することで改善するようにデザインされています。" + +#: ../../index.rst:50 +msgid "Indices and tables" +msgstr "Indices and tables" + +#: ../../index.rst:52 +msgid ":ref:`genindex`" +msgstr ":ref:`genindex`" + +#: ../../index.rst:53 +msgid ":ref:`search`" +msgstr ":ref:`search`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSAPI.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSAPI.en.po new file mode 100644 index 00000000..f98e0fb6 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSAPI.en.po @@ -0,0 +1,320 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSAPI.en.rst:21 +msgid "TSAPI" +msgstr "TSAPI" + +#: ../../reference/api/TSAPI.en.rst:24 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSAPI.en.rst:25 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSAPI.en.rst:26 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSAPI.en.rst:29 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSAPI.en.rst:30 +msgid "" +"The Apache Traffic Server API enables you to create plugins, using the C " +"programming language, that customize the behavior of your Traffic Server " +"installation." +msgstr "" +"Apache Traffic Server API は C 言語を使ってインストール済みの Traffic " +"Server の振る舞いをカスタマイズするプラグインの作成を可能にします。" + +#: ../../reference/api/TSAPI.en.rst:34 +msgid "" +"Traffic Server enables sophisticated caching and processing of web-related " +"traffic, such as DNS and HTTP requests and responses. Traffic Server itself " +"consists of an event-driven loop that can be simplified as follows::" +msgstr "" +"Traffic Server は洗練されたキャッシュと DNS や HTTP のリクエストやレスポンス" +"のような Web 関連トラフィックの処理を可能とします。Traffic Server 自体は次の" +"ように簡素化されたイベントドリブンループから成っています。" + +#: ../../reference/api/TSAPI.en.rst:44 +msgid "" +"You compile your plugin source code to create a shared library that Traffic " +"Server loads when it is started. Your plugin contains callback functions " +"that are registered for specific Traffic Server events. When Traffic Server " +"needs to process an event, it invokes any and all call-back functions " +"you've registered for that event type." +msgstr "" +"Traffic Server が動作を開始するときに読み込まれる共有ライブラリを作るために" +"プラグインのソースをコンパイルします。プラグインは特定の Traffic Server イベ" +"ントに登録されるコールバック関数を含んでいます。Traffic Server がイベントの" +"処理を必要とすると、そのイベントタイプに登録したあらゆるコールバック関数が呼" +"び出されます。" + +#: ../../reference/api/TSAPI.en.rst:51 +msgid "Possible uses for plugins include the following:" +msgstr "プラグインは次の使い方ができます。" + +#: ../../reference/api/TSAPI.en.rst:53 +msgid "" +"HTTP processing plugins can filter, blacklist, authorize users or transform " +"content." +msgstr "" +"HTTP 処理プラグインはフィルター、ブラックリスト、ユーザー認証、コンテンツの" +"変換ができます。" + +#: ../../reference/api/TSAPI.en.rst:54 +msgid "" +"Protocol plugins can enable Traffic Server to proxy-cache new protocol " +"content." +msgstr "" +"プロトコルプラグインは Traffic Server が新しいプロトコルコンテンツをプロキ" +"シーキャッシュできるようにします。" + +#: ../../reference/api/TSAPI.en.rst:55 +msgid "" +"A blacklisting plugin denies attempts to access web sites that are off-" +"limits." +msgstr "" +"ブラックリストプラグインは進入禁止の Web サイトへのアクセスを拒絶します。" + +#: ../../reference/api/TSAPI.en.rst:56 +msgid "Append transform plugins add data to HTTP response content." +msgstr "追加変換プラグインは HTTP レスポンスコンテンツにデータを追加します。" + +#: ../../reference/api/TSAPI.en.rst:57 +msgid "An image conversion plugin transforms JPEG images to GIF images." +msgstr "画像変換プラグインは JPEG 画像を GIF 画像に変換します。" + +#: ../../reference/api/TSAPI.en.rst:58 +msgid "" +"Compression plugins send response content to a compression server that " +"compresses the data (alternatively, a compression library local to the " +"Traffic Server host machine could do the compression)." +msgstr "" +"圧縮プラグインはデータを圧縮する圧縮サーバーにレスポンスデータを送信します " +"(もしくは、Traffic Server のホストマシンにある圧縮ライブラリが圧縮を行うかも" +"しれません) 。" + +#: ../../reference/api/TSAPI.en.rst:61 +msgid "" +"Authorization plugins check a user's permissions to access particular web " +"sites. The plugin could consult a local authorization program or send " +"queries to an authorization server." +msgstr "" +"認証プラグインは特定の Web サイトにアクセスするためのユーザーの権限を確認し" +"ます。このプラグインはローカルの認証プログラムに問い合わせたり、認証サーバー" +"に問い合わせを送信したりするかもしれません。" + +#: ../../reference/api/TSAPI.en.rst:64 +msgid "" +"A plugin that gathers client information from request headers and enters " +"this information in a database." +msgstr "" +"リクエストヘッダーからクライアントの情報を収集しその情報をデータベースに入力" +"するプラグイン。" + +#: ../../reference/api/TSAPI.en.rst:66 +msgid "" +"A protocol plugin listen for specific protocol requests on a designated " +"port and then uses Traffic Server's proxy server and cache to serve client " +"requests." +msgstr "" +"プロトコルプラグインは指定されたポートへの特定のプロトコルのリクエストを受" +"け、クライアントのリクエストに応えるために Traffic Server のプロキシーサー" +"バーとキャッシュを使用します。" + +#: ../../reference/api/TSAPI.en.rst:71 +msgid "Naming conventions" +msgstr "命名規則" + +#: ../../reference/api/TSAPI.en.rst:73 +msgid "The Traffic Server API adheres to the following naming conventions:" +msgstr "Traffic Server API は次の命名規則に従います。" + +#: ../../reference/api/TSAPI.en.rst:75 +msgid "" +"The TS prefix is used for all function and variable names defined in the " +"Traffic Server API. For example, :data:`TS_EVENT_NONE`, :type:`TSMutex`, " +"and :func:`TSContCreate`." +msgstr "" +"TS プレフィックスは Traffic Server API のすべての関数と変数の名前に使用され" +"ます。例えば、:data:`TS_EVENT_NONE` 、:type:`TSMutex` 、:func:" +"`TSContCreate` となります。" + +#: ../../reference/api/TSAPI.en.rst:78 +msgid "" +"Enumerated values are always written in all uppercase letters. For " +"example, :data:`TS_EVENT_NONE` and :data:`TS_VC_CLOSE_ABORT`." +msgstr "" +"列挙された値は常にすべて大文字で書かれます。例えば、:data:`TS_EVENT_NONE` " +"や :data:`TS_VC_CLOSE_ABORT` となります。" + +#: ../../reference/api/TSAPI.en.rst:80 +msgid "" +"Constant values are all uppercase; enumerated values can be seen as a " +"subset of constants. For example, :data:`TS_URL_SCHEME_FILE` and :data:" +"`TS_MIME_FIELD_ACCEPT`." +msgstr "" +"定数はすべて大文字です。列挙された値は定数の一部に見えるかもしれません。例え" +"ば、:data:`TS_URL_SCHEME_FILE` や :data:`TS_MIME_FIELD_ACCEPT` となります。" + +#: ../../reference/api/TSAPI.en.rst:83 +msgid "" +"The names of defined types are mixed-case. For example, :type:`TSHttpSsn` " +"and :func:`TSHttpTxn`. :func:`TSDebug`" +msgstr "" +"定義された型の名前は大文字小文字が混在します。例えば、:type:`TSHttpSsn` 、:" +"func:`TSHttpTxn` 、:func:`TSDebug` となります。" + +#: ../../reference/api/TSAPI.en.rst:85 +msgid "" +"Function names are mixed-case. For example, :func:`TSUrlCreate` and :func:" +"`TSContDestroy`." +msgstr "" +"関数名は混在になります。例えば、:func:`TSUrlCreate` や :func:" +"`TSContDestroy` となります。" + +#: ../../reference/api/TSAPI.en.rst:87 +msgid "" +"Function names use the following subject-verb naming style: TS--" +", where goes from general to specific. This makes it easier " +"to determine what a function does by reading its name. For example, the " +"function to retrieve the password field (the specific subject) from a URL " +"(the general subject) is :func:`TSUrlPasswordGet`." +msgstr "" +"関数名は次の 主語-動詞 命名スタイルを使用します。TS-<主語>-<動詞> 、<主語> " +"の部分は一般的なものから限定的なものにしていきます。これはその名前を読むこと" +"で関数が何をするのかを分かりやすくします。例えば、URL (一般的な主語) からパ" +"スワードフィールド (限定的な主語) を取得する関数は :func:`TSUrlPasswordGet` " +"となります。" + +#: ../../reference/api/TSAPI.en.rst:93 +msgid "" +"Common verbs like Create, Destroy, Get, Set, Copy, Find, Retrieve, Insert, " +"Remove, and Delete are used only when appropriate." +msgstr "" +"Create 、Destroy 、Get 、Set 、Copy 、Find 、Retrieve 、Insert 、Remove 、" +"Delete のような一般的な動詞はそれが適切な場合にのみ使用されます。" + +#: ../../reference/api/TSAPI.en.rst:97 +msgid "Plugin loading and configuration" +msgstr "プラグインの読み込みと設定" + +#: ../../reference/api/TSAPI.en.rst:99 +msgid "" +"When Traffic Server is first started, it consults the plugin.config file to " +"determine the names of all shared plugin libraries that need to be loaded. " +"The plugin.config file also defines arguments that are to be passed to each " +"plugin's initialization function, :func:`TSPluginInit`. The :file:`records." +"config` file defines the path to each plugin shared library." +msgstr "" +"Traffic Server が最初にスタートするとき、読み込まれる必要のあるすべての共有" +"ライブラリの名前を決定するために plugin.config ファイルを参照します。 " +"plugin.config ファイルは個々のプラグインの初期化関数 :func:`TSPluginInit` に" +"渡される引数も定義します。:file:`records.config` ファイルは個々のプラグイン" +"共有ライブラリのパスを定義します。" + +#: ../../reference/api/TSAPI.en.rst:106 +msgid "" +"The sample :file:`plugin.config` file below contains a comment line, a " +"blank line, and two plugin configurations::" +msgstr "" +"以下の :file:`plugin.config` ファイルのサンプルはコメント行、空行、二つのプ" +"ラグインの設定から成っています。" + +#: ../../reference/api/TSAPI.en.rst:113 +msgid "" +"Each plugin configuration in the :file:`plugin.config` file resembles a " +"UNIX or DOS shell command; each line in :file:`plugin.config` cannot exceed " +"1023 characters." +msgstr "" +":file:`plugin.config` ファイル内の個々のプラグインの設定は UNIX または DOS " +"のシェルコマンドに似ています。:file:`plugin.config` 内の各行は 1023 文字を超" +"えることはできません。" + +#: ../../reference/api/TSAPI.en.rst:117 +msgid "" +"The first plugin configuration is for a plugin named my-plugin.so. It " +"contains three arguments that are to be passed to that plugin's " +"initialization routine. The second configuration is for a plugin named some-" +"plugin.so; it contains three arguments. The last argument, $proxy.config." +"http.cache.on, is actually a configuration variable. Traffic Server will " +"look up the specified configuration variable and substitute its value." +msgstr "" +"最初のプラグインの設定は my-plugin.so と名付けられたプラグインのためのもので" +"す。これはそのプラグインの初期化ルーチンに渡される三つの引数を含んでいます。" +"二番目の設定は some-plugin.so と名付けられたプラグインのためのものです。これ" +"は三つの引数を含んでいます。その最後の引数である $proxy.config.http.cache." +"on は実は設定変数です。Traffic Server は指定された設定変数を参照し、その値で" +"置き換えます。" + +#: ../../reference/api/TSAPI.en.rst:125 +msgid "" +"Plugins are loaded and initialized by Traffic Server in the order they " +"appear in the :file:`plugin.config` file." +msgstr "" +"プラグインは :file:`plugin.config` ファイルに出てくる順番で Traffic Server " +"に読み込まれ初期化されます。" + +#: ../../reference/api/TSAPI.en.rst:129 +msgid "Plugin initialization" +msgstr "プラグインの初期化" + +#: ../../reference/api/TSAPI.en.rst:131 +msgid "" +"Each plugin must define an initialization function named :func:" +"`TSPluginInit` that Traffic Server invokes when the plugin is loaded. :func:" +"`TSPluginInit` is commonly used to read configuration information and " +"register hooks for event notification." +msgstr "" +"各プラグインはプラグインが読み込まれたときに Traffic Server が呼び出す :" +"func:`TSPluginInit` という初期化関数を定義していなければなりません。:func:" +"`TSPluginInit` は一般的に設定情報を読み込みイベント通知のフックを登録するた" +"めに使用されます。" + +#: ../../reference/api/TSAPI.en.rst:138 +msgid "Files" +msgstr "ファイル" + +#: ../../reference/api/TSAPI.en.rst:139 +msgid "" +":file:`{CONFIG_DIR}/plugin.config`, :file:`{CONFIG_DIR}/records.config`" +msgstr "" +":file:`{CONFIG_DIR}/plugin.config`, :file:`{CONFIG_DIR}/records.config`" + +#: ../../reference/api/TSAPI.en.rst:142 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSAPI.en.rst:143 +msgid ":manpage:`TSPluginInit(3ts)`" +msgstr ":manpage:`TSPluginInit(3ts)`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSDebug.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSDebug.en.po new file mode 100644 index 00000000..1e7c210b --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSDebug.en.po @@ -0,0 +1,117 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSDebug.en.rst:21 +msgid "TSDebug" +msgstr "TSDebug" + +#: ../../reference/api/TSDebug.en.rst:24 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSDebug.en.rst:25 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSDebug.en.rst:39 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSDebug.en.rst:41 +msgid "" +":func:`TSError` is similar to :func:`printf` except that instead of writing " +"the output to the C standard output, it writes output to the Traffic Server " +"error log." +msgstr "" +":func:`TSError` は C 標準出力に書き込む代わりに Traffic Server のエラーログ" +"に書き込むことを除けば :func:`printf` に似ています。" + +#: ../../reference/api/TSDebug.en.rst:45 +msgid "" +":func:`TSDebug` is the same as :func:`TSError` except that it only logs the " +"debug message if the given debug tag is enabled. It writes output to the " +"Traffic Server debug log." +msgstr "" +":func:`TSDebug` は与えられたデバッグタグが有効化されている場合にのみログメッ" +"セージを出力する点を除いて :func:`TSError` と同じです。" + +#: ../../reference/api/TSDebug.en.rst:49 +msgid "" +":func:`TSIsDebugTagSet` returns non-zero if the given debug tag is enabled." +msgstr "" +":func:`TSIsDebugTagSet` は与えられたデバッグタグが有効化されている場合に非ゼ" +"ロを返します。" + +#: ../../reference/api/TSDebug.en.rst:52 +msgid "" +"In debug mode, :macro:`TSAssert` Traffic Server to prints the file name, " +"line number and expression, and then aborts. In release mode, the " +"expression is not removed but the effects of printing an error message and " +"aborting are. :macro:`TSReleaseAssert` prints an error message and aborts " +"in both release and debug mode." +msgstr "" +"デバッグモードでは、:macro:`TSAssert` は Traffic Server にファイル名、行番" +"号、式を出力させ、処理を中断します。リリースモードでは、式の出力は残ります" +"が、エラーメッセージの出力と処理の中断は行われません。:macro:" +"`TSReleaseAssert` はリリースモードとデバッグモードのどちらでもエラーメッセー" +"ジの出力と処理の中断を行います。" + +#: ../../reference/api/TSDebug.en.rst:65 +msgid "Examples" +msgstr "例" + +#: ../../reference/api/TSDebug.en.rst:67 +msgid "" +"This example uses :func:`TSDebugSpecific` to log a message when a specific " +"debugging flag is enabled::" +msgstr "" +"この例は特定のデバッグフラグが有効になっている場合にメッセージをログに出力す" +"るために :func:`TSDebugSpecific` を使用しています。" + +#: ../../reference/api/TSDebug.en.rst:78 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSDebug.en.rst:79 +msgid ":manpage:`TSAPI(3ts)`, :manpage:`printf(3)`" +msgstr ":manpage:`TSAPI(3ts)`, :manpage:`printf(3)`" + +#: ../../reference/api/TSDebug.en.rst:58 +msgid "" +":func:`TSDebugSpecific` emits a debug line even if the debug tag is turned " +"off, as long as debug flag is enabled. This can be used in conjunction " +"with :func:`TSHttpTxnDebugSet`, :func:`TSHttpSsnDebugSet`, :func:" +"`TSHttpTxnDebugGet` and :func:`TSHttpSsnDebugGet` to enable debugging on " +"specific session and transaction objects." +msgstr "" +":func:`TSDebugSpecific` はデバッグタグがオフにされていてもデバッグフラグ" +"が有効化されてさえいればデバッグ行を出力します。これは特定のセッションと" +"トランザクションオブジェクトでデバッグを有効化するために :func:" +"`TSHttpTxnDebugSet` 、:func:`TSHttpSsnDebugSet` 、:func:" +"`TSHttpTxnDebugGet` 、:func:`TSHttpSsnDebugGet` と組み合わせて使用される" +"ことがあります。" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpHookAdd.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpHookAdd.en.po new file mode 100644 index 00000000..f47b23aa --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpHookAdd.en.po @@ -0,0 +1,132 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSHttpHookAdd.en.rst:22 +msgid "TSHttpHookAdd" +msgstr "TSHttpHookAdd" + +#: ../../reference/api/TSHttpHookAdd.en.rst:25 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSHttpHookAdd.en.rst:26 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSHttpHookAdd.en.rst:33 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSHttpHookAdd.en.rst:35 +msgid "" +"Hooks are points in Apache Traffic Server transaction HTTP processing where " +"plugins can step in and do some work. Registering a plugin function for " +"callback amounts to adding the function to a hook. You can register your " +"plugin to be called back for every single transaction, or for specific " +"transactions only." +msgstr "" +"フックは Apache Traffic Server の HTTP 処理の中でプラグインが入り込み何かで" +"きる場所です。コールバックのためのプラグインの関数を登録することはフックに関" +"数を追加することと同じです。一つずつすべてのトランザクション、もしくは特定の" +"トランザクションのみにコールバックされるプラグインを登録することができます。" + +#: ../../reference/api/TSHttpHookAdd.en.rst:41 +msgid "" +"HTTP :term:`transaction` hooks are set on a global basis using the " +"function :func:`TSHttpHookAdd`. This means that the continuation specified " +"as the parameter to :func:`TSHttpHookAdd` is called for every transaction. :" +"func:`TSHttpHookAdd` is typically called from :func:`TSPluginInit` or :func:" +"`TSRemapInit`." +msgstr "" +"HTTP :term:`transaction` フックは関数 :func:`TSHttpHookAdd` を使いグローバル" +"にセットされます。これは :func:`TSHttpHookAdd` のパラメーターとして指定され" +"た継続は全トランザクションで呼ばれるということを意味します。:func:" +"`TSHttpHookAdd` は一般的に :func:`TSPluginInit` もしくは :func:" +"`TSRemapInit` から呼ばれます。" + +#: ../../reference/api/TSHttpHookAdd.en.rst:47 +msgid "" +":func:`TSHttpSsnHookAdd` adds :arg:`contp` to the end of the list of HTTP :" +"term:`session` hooks specified by :arg:`id`. This means that :arg:`contp` " +"is called back for every transaction within the session, at the point " +"specified by the hook ID. Since :arg:`contp` is added to a session, it is " +"not possible to call :func:`TSHttpSsnHookAdd` from the plugin " +"initialization routine; the plugin needs a handle to an HTTP session." +msgstr "" +":func:`TSHttpSsnHookAdd` は :arg:`contp` を :arg:`id` で指定された HTTP :" +"term:`session` フックのリストの最後に追加します。これは :arg:`contp` がフッ" +"ク ID で指定された場所において、セッション中のすべてのトランザクションで呼び" +"出されることを意味します。:arg:`contp` がセッションに追加されるので、プラグ" +"インの初期化ルーチンから :func:`TSHttpSsnHookAdd` を呼び出すことはできませ" +"ん。プラグインは HTTP セッションを取り扱う必要があります。" + +#: ../../reference/api/TSHttpHookAdd.en.rst:55 +msgid "" +":func:`TSHttpTxnHookAdd` adds :arg:`contp` to the end of the list of HTTP " +"transaction hooks specified by :arg:`id`. Since :arg:`contp` is added to a " +"transaction, it is not possible to call :func:`TSHttpTxnHookAdd` from the " +"plugin initialization routine but only when the plugin has a handle to an " +"HTTP transaction." +msgstr "" +":func:`TSHttpTxnHookAdd` は :arg:`contp` を :arg:`id` で指定された HTTP トラ" +"ンザクションフックのリストの最後に追加します。:arg:`contp` がトランザクショ" +"ンに追加されるので、プラグインの初期化ルーチンから :func:`TSHttpTxnHookAdd` " +"を呼び出すことはできず、プラグインが HTTP トランザクションを取り扱うときのみ" +"呼び出せます。" + +#: ../../reference/api/TSHttpHookAdd.en.rst:63 +msgid "Return values" +msgstr "戻り値" + +#: ../../reference/api/TSHttpHookAdd.en.rst:65 +msgid "None. Adding hooks is always successful." +msgstr "無し。フックの追加は常に成功します。" + +#: ../../reference/api/TSHttpHookAdd.en.rst:68 +msgid "Examples" +msgstr "例" + +#: ../../reference/api/TSHttpHookAdd.en.rst:70 +msgid "" +"The following example demonstrates how to add global, session and " +"transaction hooks::" +msgstr "" +"次の例はどのようにグローバル、セッション、トランザクションフックを追加するか" +"を紹介しています。" + +#: ../../reference/api/TSHttpHookAdd.en.rst:110 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSHttpHookAdd.en.rst:111 +msgid "" +":manpage:`TSAPI(3ts)`, :manpage:`TSContCreate(3ts)`, :manpage:" +"`TSLifecycleHookAdd(3ts)`" +msgstr "" +":manpage:`TSAPI(3ts)`, :manpage:`TSContCreate(3ts)`, :manpage:" +"`TSLifecycleHookAdd(3ts)`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpOverridableConfig.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpOverridableConfig.en.po new file mode 100644 index 00000000..3c93f5c5 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpOverridableConfig.en.po @@ -0,0 +1,108 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:24 +msgid "TSHttpOverridableConfig" +msgstr "TSHttpOverridableConfig" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:27 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:28 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:41 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:43 +msgid "" +"Some of the values that are set in :file:`records.config` can be changed " +"for a specific transaction. It is important to note that these functions " +"change the configuration values stored for the transation, which is not " +"quite the same as changing the actual operating values of the transaction. " +"The critical effect is the value must be changed before it is used by the " +"transaction - after that, changes will not have any effect." +msgstr "" +":file:`records.config` に設定されるいくつかの値は特定のトランザクションにに" +"おいて変更することが可能です。これらの関数はトランザクション用に格納された設" +"定値を変更すること、つまりトランザクションが実際に処理する際の値を変更するの" +"とまったく同じではないことに注意することが大切です。その重要な影響は値がトラ" +"ンザクションで使用される前に変更されていなければならないことです。使用された" +"後だった場合、変更は何の効果も持ちません。" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:48 +msgid "" +"All of the ``...Get`` functions store the internal value in the storage " +"indicated by the :arg:`value` argument. For strings :arg:`length*` will " +"receive the length of the string." +msgstr "" +"すべての ``...Get`` 関数は :arg:`value` 引数で示された記憶領域に内部値を格納" +"します。文字列では :arg:`length*` は文字列の長さを受け取ります。" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:50 +msgid "" +"The values are identified by the enumeration :type:" +"`TSOverridableConfigKey`. String values can be used indirectly by first " +"passing them to :func:`TSHttpTxnConfigFind` which, if the string matches an " +"overridable value, return the key and data type." +msgstr "" +"値は列挙型 :type:`TSOverridableConfigKey` で特定されます。文字列値は最初に :" +"func:`TSHttpTxnConfigFind` に渡すことで間接的に使用でき、この関数は文字列が" +"上書き可能な値に一致した場合にキーとデータ型を返します。" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:139 +msgid "Examples" +msgstr "例" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:141 +msgid "" +"Enable :ref:`transaction buffer control ` " +"with a high water mark of 262144 and a low water mark of 65536. ::" +msgstr "" +":ref:`transaction-buffering-control` を high water マーク 262144 と low " +"water マーク 65536 で有効化します。\\ ::" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:153 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:154 +msgid ":manpage:`TSAPI(3ts)`" +msgstr ":manpage:`TSAPI(3ts)`" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:55 +msgid "Configurations" +msgstr "設定" + +#: ../../reference/api/TSHttpOverridableConfig.en.rst:57 +msgid "" +"The following configurations (from ``records.config``) are overridable: ::" +msgstr "次の (``records.config`` の ) 設定は上書き可能です。" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpParserCreate.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpParserCreate.en.po new file mode 100644 index 00000000..50ba6621 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpParserCreate.en.po @@ -0,0 +1,122 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSHttpParserCreate.en.rst:22 +msgid "TSHttpParserCreate" +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:25 +msgid "Synopsis" +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:27 +msgid "`#include `" +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:36 +msgid "Description" +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:38 +msgid "" +":func:`TSHttpParserCreate` creates an HTTP parser object. The parser's data " +"structure contains information about the header being parsed. A single HTTP " +"parser can be used multiple times, though not simultaneously. Before being " +"used again, the parser must be cleared by calling :func:`TSHttpParserClear`." +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:44 +msgid "" +":func:`TSHttpHdrParseReq` parses an HTTP request header. The HTTP header :" +"data:`offset` must already be created, and must reside inside the marshal " +"buffer :data:`bufp`. The :data:`start` argument points to the current " +"position of the string buffer being parsed and the :data:`end` argument " +"points to one byte after the end of the buffer to be parsed. On return, :" +"data:`start` is modified to point past the last character parsed." +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:52 +msgid "" +"It is possible to parse an HTTP request header a single byte at a time " +"using repeated calls to :func:`TSHttpHdrParseReq`. As long as an error does " +"not occur, the :func:`TSHttpHdrParseReq` function will consume that single " +"byte and ask for more. :func:`TSHttpHdrParseReq` should be called after :" +"data:`TS_HTTP_READ_REQUEST_HDR_HOOK`." +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:58 +msgid "" +":func:`TSHttpHdrParseResp` operates in the same manner as :func:" +"`TSHttpHdrParseReq` except it parses an HTTP response header. It should be " +"called after :data:`TS_HTTP_READ_RESPONSE_HDR_HOOK`." +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:62 +msgid "" +":func:`TSHttpParserClear` clears the specified HTTP parser so it may be " +"used again." +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:65 +msgid "" +":func:`TSHttpParserDestroy` destroys the TSHttpParser object pointed to by :" +"data:`parser`. The :data:`parser` pointer must not be NULL." +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:69 +msgid "Return values" +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:71 +msgid "" +":func:`TSHttpHdrParseReq` and :func:`TSHttpHdrParseResp` both return a :" +"type:`TSParseResult` value. :data:`TS_PARSE_ERROR` is returned on error, :" +"data:`TS_PARSE_CONT` is returned if parsing of the header stopped because " +"the end of the buffer was reached, and :data:`TS_PARSE_DONE` or :data:" +"`TS_PARSE_OK` when a \\\\r\\\\n\\\\r\\\\n pattern is encountered, " +"indicating the end of the header." +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:79 +msgid "Bugs" +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:81 +msgid "" +"The distinction between the :data:`TS_PARSE_DONE` and :data:`TS_PARSE_OK` " +"results is not well-defined. Plugins should expect both status codes and " +"treat them equivalently." +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:86 +msgid "See also" +msgstr "" + +#: ../../reference/api/TSHttpParserCreate.en.rst:87 +msgid ":manpage:`TSAPI(3ts)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpTxnMilestoneGet.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpTxnMilestoneGet.en.po new file mode 100644 index 00000000..e54d374d --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSHttpTxnMilestoneGet.en.po @@ -0,0 +1,259 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:22 +msgid "TSHttpTxnMilestoneGet" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:25 +msgid "Synopsis" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:27 +msgid "`#include `" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:32 +msgid "Description" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:34 +msgid "" +":func:`TSHttpTxnMilestoneGet` will fetch a specific :arg:`milestone` timer " +"value for the transaction :arg:`txnp`. These timers are calculated during " +"the lifetime of a transaction and are measured in nanoseconds from the " +"beginning of the transaction. :arg:`time` is used a pointer to storage to " +"update if the call is successful." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:41 +msgid "Value" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:41 +msgid "Milestone" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:43 +msgid ":const:`TS_MILESTONE_UA_BEGIN`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:43 +msgid "The client connection is accepted." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:44 +msgid ":const:`TS_MILESTONE_UA_READ_HEADER_DONE`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:44 +msgid "The request header from the client has been read and parsed." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:45 +msgid ":const:`TS_MILESTONE_UA_BEGIN_WRITE`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:45 +msgid "The response header write to the client starts." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:46 +msgid ":const:`TS_MILESTONE_UA_CLOSE`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:46 +msgid "Last I/O activity on the client socket, or connection abort." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:47 +msgid ":const:`TS_MILESTONE_SERVER_FIRST_CONNECT`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:47 +msgid "" +"First time origin server connect attempted or shared shared session " +"attached." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:48 +msgid ":const:`TS_MILESTONE_SERVER_CONNECT`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:48 +msgid "" +"Most recent time origin server connect attempted or shared session attached." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:49 +msgid ":const:`TS_MILESTONE_SERVER_CONNECT_END`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:49 +msgid "More recent time a connection attempt was resolved." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:50 +msgid ":const:`TS_MILESTONE_SERVER_BEGIN_WRITE`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:50 +msgid "First byte is written to the origin server connection." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:51 +msgid ":const:`TS_MILESTONE_SERVER_FIRST_READ`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:51 +msgid "First byte is read from connection to origin server." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:52 +msgid ":const:`TS_MILESTONE_SERVER_READ_HEADER_DONE`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:52 +msgid "Origin server response has been read and parsed." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:53 +msgid ":const:`TS_MILESTONE_SERVER_CLOSE`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:53 +msgid "Last I/O activity on origin server connection." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:54 +msgid ":const:`TS_MILESTONE_CACHE_OPEN_READ_BEGIN`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:54 +msgid "Initiate read of the cache." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:55 +msgid ":const:`TS_MILESTONE_CACHE_OPEN_READ_END`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:55 +msgid "Initial cache read has resolved." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:56 +msgid ":const:`TS_MILESTONE_CACHE_OPEN_WRITE_BEGIN`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:56 +msgid "Start open for cache write." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:57 +msgid ":const:`TS_MILESTONE_CACHE_OPEN_WRITE_END`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:57 +msgid "Cache has been opened for write." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:58 +msgid ":const:`TS_MILESTONE_DNS_LOOKUP_BEGIN`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:58 +msgid "Initiate host resolution in HostDB" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:59 +msgid ":const:`TS_MILESTONE_DNS_LOOKUP_END`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:59 +msgid "Host resolution resolves." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:60 +msgid ":const:`TS_MILESTONE_SM_START`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:60 +msgid "Transaction state machine is initialized." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:61 +msgid ":const:`TS_MILESTONE_SM_FINISH`" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:61 +msgid "Transaction has finished, state machine final logging has started." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:64 +msgid "" +"The server connect times predate the transmission of the ``SYN`` packet. " +"That is, before a connection to the origin server is completed." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:67 +msgid "" +"A connection attempt is *resolved* when no more connection related activity " +"remains to be done, and the connection is either established or has failed." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:70 +msgid "" +":const:`TS_MILESTONE_UA_CLOSE` and :const:`TS_MILESTONE_SERVER_CLOSE` are " +"updated continuously during the life of the transaction, every time there " +"is I/O activity. The updating stops when the corresponding connection is " +"closed, leaving the last I/O time as the final value." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:74 +msgid "" +"The cache ``OPEN`` milestones time only the initial setup, the \"open\", " +"not the full read or write." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:77 +msgid "Return values" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:79 +msgid "" +":const:`TS_SUCCESS` if successful and :arg:`time` was updated, otherwise :" +"const:`TS_ERROR`." +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:82 +msgid "See also" +msgstr "" + +#: ../../reference/api/TSHttpTxnMilestoneGet.en.rst:83 +msgid ":manpage:`TSAPI(3ts)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSIOBufferCreate.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSIOBufferCreate.en.po new file mode 100644 index 00000000..40448301 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSIOBufferCreate.en.po @@ -0,0 +1,123 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSIOBufferCreate.en.rst:22 +msgid "TSIOBufferCreate" +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:25 +msgid "Synopsis" +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:26 +msgid "`#include `" +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:37 +msgid "Description" +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:39 +msgid "" +"The :type:`TSIOBuffer` data structure is the building block of the TSVConn " +"abstraction. An IO buffer is composed of a list of buffer blocks which are " +"reference counted so that they can reside in multiple buffers at the same " +"time. This makes it extremely efficient to copy data from one IO buffer to " +"another using :func:`TSIOBufferCopy` since Traffic Server only needs to " +"copy pointers and adjust reference counts appropriately and not actually " +"copy any data; however applications should still strive to ensure data " +"blocks are a reasonable size." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:48 +msgid "" +"The IO buffer abstraction provides for a single writer and multiple " +"readers. In order for the readers to have no knowledge of each other, they " +"manipulate IO buffers through the :type:`TSIOBufferReader` data structure. " +"Since only a single writer is allowed, there is no corresponding :type:" +"`TSIOBufferWriter` data structure. The writer simply modifies the IO buffer " +"directly." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:55 +msgid ":func:`TSIOBufferCreate` creates an empty :type:`TSIOBuffer`." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:57 +msgid "" +":func:`TSIOBufferSizedCreate` creates an empty :type:`TSIOBuffer` with an " +"initial capacity of index bytes." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:60 +msgid "" +":func:`TSIOBufferDestroy` destroys the IO buffer bufp. Since multiple IO " +"buffers can share data, this does not necessarily free all of the data " +"associated with the IO buffer but simply decrements the appropriate " +"reference counts." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:64 +msgid "" +":func:`TSIOBufferWrite` appends length bytes from the buffer buf to the IO " +"buffer bufp and returns the number of bytes successfully written into the " +"IO buffer." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:68 +msgid "" +":func:`TSIOBufferProduce` makes nbytes of data available for reading in the " +"IO buffer bufp. A common pattern for writing to an IO buffer is to copy " +"data into a buffer block and then call INKIOBufferProduce to make the new " +"data visible to any readers." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:73 +msgid "" +"The watermark of an TSIOBuffer is the minimum number of bytes of data that " +"have to be in the buffer before calling back any continuation that has " +"initiated a read operation on this buffer. As a writer feeds data into the " +"TSIOBuffer, no readers are called back until the amount of data reaches the " +"watermark. Setting a watermark can improve performance because it avoids " +"frequent callbacks to read small amounts of data. :func:" +"`TSIOBufferWaterMarkGet` gets the current watermark for the IO buffer bufp." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:82 +msgid "" +":func:`TSIOBufferWaterMarkSet` gets the current watermark for the IO buffer " +"bufp to water_mark bytes." +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:86 +msgid "See also" +msgstr "" + +#: ../../reference/api/TSIOBufferCreate.en.rst:88 +msgid ":manpage:`TSAPI(3ts)`, :manpage:`TSIOBufferReaderAlloc(3ts)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSInstallDirGet.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSInstallDirGet.en.po new file mode 100644 index 00000000..dcb00537 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSInstallDirGet.en.po @@ -0,0 +1,84 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSInstallDirGet.en.rst:22 +msgid "TSInstallDirGet" +msgstr "TSInstallDirGet" + +#: ../../reference/api/TSInstallDirGet.en.rst:25 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSInstallDirGet.en.rst:27 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSInstallDirGet.en.rst:34 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSInstallDirGet.en.rst:36 +msgid "" +":func:`TSInstallDirGet` returns the path to the root of the Traffic Server " +"installation. :func:`TSConfigDirGet` and :func:`TSPluginDirGet` return the " +"complete, absolute path to the configuration directory and the plugin " +"installation directory respectively." +msgstr "" +":func:`TSInstallDirGet` は インストール済みの Traffic Server のルートディレ" +"クトリのパスを返します。:func:`TSConfigDirGet` と :func:`TSPluginDirGet` は" +"それぞれ設定ディレクトリとインストール済みのプラグインディレクトリへの完全な" +"絶対パスを返します。" + +#: ../../reference/api/TSInstallDirGet.en.rst:42 +msgid "Return values" +msgstr "戻り値" + +#: ../../reference/api/TSInstallDirGet.en.rst:44 +msgid "" +"These functions all return a NUL-terminated string that must not be " +"modified or freed." +msgstr "" +"これらの関数はすべて変更や解放をしてはならない NULL 終端文字列を返します。" + +#: ../../reference/api/TSInstallDirGet.en.rst:47 +msgid "Examples" +msgstr "例" + +#: ../../reference/api/TSInstallDirGet.en.rst:49 +msgid "" +"To load a file that is located in the Traffic Server configuration " +"directory::" +msgstr "Traffic Server 設定ディレクトリにあるファイルを読み込むためには::" + +#: ../../reference/api/TSInstallDirGet.en.rst:58 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSInstallDirGet.en.rst:59 +msgid ":manpage:`TSAPI(3ts)`" +msgstr ":manpage:`TSAPI(3ts)`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSLifecycleHookAdd.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSLifecycleHookAdd.en.po new file mode 100644 index 00000000..3a094c09 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSLifecycleHookAdd.en.po @@ -0,0 +1,192 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:24 +msgid "TSLifecycleHookAdd" +msgstr "TSLifecycleHookAdd" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:27 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:28 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:33 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:35 +msgid "" +":func:`TSLifecycleHookAdd` adds :arg:`contp` to the list of lifecycle hooks " +"specified by :arg:`id`. Lifecycle hooks are based on the Traffic Server " +"process, not on any specific transaction or session. These will typically " +"be called only once during the execution of the Traffic Server process and " +"therefore should be added in :func:`TSPluginInit` (which could itself be " +"considered a lifecyle hook). Unlike other hooks, lifecycle hooks may not " +"have a well defined ordering and use of them should not assume that one of " +"the hooks is always called before another unless specifically mentioned." +msgstr "" +":func:`TSLifecycleHookAdd` は :arg:`contp` を :arg:`id` で指定されるライフサ" +"イクルフックのリストに追加します。ライフサイクルフックは特定のトランザクショ" +"ンやセッションではなく Traffic Server プロセスのもとに成り立ちます。これらは" +"一般的に Traffic Server プロセスの実行中に一度だけ呼び出されるので、( それ自" +"体もライフサイクルフックとみなされる ) :func:`TSPluginInit` の中で追加される" +"べきです。他のフックと違い、ライフサイクルフックは明確な順序を持たず、使用に" +"あたっては特に言及されていない限りフックのうちの一つが常に他のどれかより前に" +"呼び出されると仮定すべきではありません。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:45 +msgid "`TS_LIFECYCLE_PORTS_INITIALIZED_HOOK`" +msgstr "`TS_LIFECYCLE_PORTS_INITIALIZED_HOOK`" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:40 +msgid "" +"Called after the :ts:cv:`proxy server port ` data structures have been initialized but before connections " +"are accepted on those ports. The sockets corresponding to the ports may or " +"may not be open depending on how the :program:`traffic_server` process was " +"invoked. Other API functions that depend on server ports should be called " +"from this hook and not :func:`TSPluginInit`." +msgstr "" +":ts:cv:`プロキシーサーバーポート ` データ構" +"造が初期化された後、接続がそれらのポート上で accept される前に呼ばれます。" +"ポートに対応するソケットは :program:`traffic_server` プロセスがどのように起" +"動されたかによって open されることもされないこともあります。サーバーポートに" +"依存するその他の API 関数は :func:`TSPluginInit` ではなくこのフックから呼ば" +"れるべきです。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:45 +msgid "" +"Invoked with the event `TS_EVENT_LIFECYCLE_PORTS_INITIALIZED` and `NULL` " +"data." +msgstr "" +"`TS_EVENT_LIFECYCLE_PORTS_INITIALIZED` イベントと `NULL` データとともに呼び" +"出されます。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:53 +msgid "`TS_LIFECYCLE_PORTS_READY_HOOK`" +msgstr "`TS_LIFECYCLE_PORTS_READY_HOOK`" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:48 +msgid "" +"Called after enabling connections on the proxy server ports. Because " +"Traffic Server is threaded this may or may not be called before any " +"connections are accepted. The hook code may assume that any connection to " +"Traffic Server started after this hook is called will be accepted by " +"Traffic Server, making this a convenient place to signal external processes " +"of that." +msgstr "" +"プロキシーサーバーポート上での接続の有効化の後に呼ばれます。Traffic Server " +"はスレッドで動作するので、接続を受け付ける前に呼ばれることも、そうでないこと" +"もあります。フックコードはこのフックが呼ばれた後に開始された Traffic Server " +"に対する接続が Traffic Server によって accept されると仮定し、そのことを外部" +"プロセスに通知する便利な場所とできます。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:53 +msgid "" +"Invoked with the event `TS_EVENT_LIFECYCLE_PORTS_READY` and `NULL` data." +msgstr "" +"`TS_EVENT_LIFECYCLE_PORTS_READY` イベントと `NULL` データとともに呼び出され" +"ます。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:58 +msgid "`TS_LIFECYCLE_CACHE_READY_HOOK`" +msgstr "`TS_LIFECYCLE_CACHE_READY_HOOK`" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:56 +msgid "Called after Traffic Server cache initialization has finished." +msgstr "Traffic Server のキャッシュの初期化が完了した後に呼ばれます。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:58 +msgid "" +"Invoked with the event `TS_EVENT_LIFECYCLE_CACHE_READY` and `NULL` data." +msgstr "" +"`TS_EVENT_LIFECYCLE_CACHE_READY` イベントと `NULL` データとともに呼び出され" +"ます。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:61 +msgid "Ordering" +msgstr "順番" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:63 +msgid "" +"`TS_LIFECYCLE_PORTS_INITIALIZED_HOOK` will always be called before " +"`TS_LIFECYCLE_PORTS_READY_HOOK`." +msgstr "" +"`TS_LIFECYCLE_PORTS_INITIALIZED_HOOK` は常に " +"`TS_LIFECYCLE_PORTS_READY_HOOK` の前に呼び出されます。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:66 +msgid "Examples" +msgstr "例" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:68 +msgid "" +"The following example demonstrates how to correctly use :func:" +"`TSNetAcceptNamedProtocol`, which requires the proxy ports to be " +"initialized and therefore does not work if called from :func:`TSPluginInit` " +"directly. ::" +msgstr "" +"次の例は :func:`TSNetAcceptNamedProtocol` を正しく使用する方法、つまりプロキ" +"シーポートが初期化され :func:`TSPluginInit` から直接呼び出された場合に動作し" +"ないようにする方法を説明しています。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:100 +msgid "History" +msgstr "歴史" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:102 +msgid "" +"Lifecycle hooks were introduced to solve process initialization ordering " +"issues (`TS-1487 `_). " +"Different API calls required different modules of Traffic Server to be " +"initialized for the call to work, but others did not work that late in " +"initialization, which was problematic because all of them could effectively " +"only be called from :func:`TSPluginInit` . The solution was to move :func:" +"`TSPluginInit` as early as possible in the process initialization and " +"provide hooks for API calls that needed to be invoked later which served " +"essentially as additional pluging initialization points." +msgstr "" +"ライフサイクルフックはプロセスの初期化順の課題を解決するために登場しました " +"(`TS-1487 `_) 。異なる API の" +"呼び出しは Traffic Server の異なるモジュールが初期化されることを要求していま" +"したが、初期化の終盤のものは動作せず、事実上 :func:`TSPluginInit` からのみ呼" +"び出すことが可能だったので問題となっていました。その解決方法は :func:" +"`TSPluginInit` をプロセスの初期化の可能な限り早い位置に移し、本来は追加のプ" +"ラグイン初期化ポイントとして提供され後で呼び出される必要のあった API 呼び出" +"しのためにフックを提供することでした。" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:108 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSLifecycleHookAdd.en.rst:109 +msgid ":manpage:`TSAPI(3ts)`, :manpage:`TSContCreate(3ts)`" +msgstr ":manpage:`TSAPI(3ts)` 、\\ :manpage:`TSContCreate(3ts)`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSMBufferCreate.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSMBufferCreate.en.po new file mode 100644 index 00000000..863b9993 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSMBufferCreate.en.po @@ -0,0 +1,119 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSMBufferCreate.en.rst:21 +msgid "TSMBufferCreate" +msgstr "TSMBufferCreate" + +#: ../../reference/api/TSMBufferCreate.en.rst:24 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSMBufferCreate.en.rst:26 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSMBufferCreate.en.rst:33 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSMBufferCreate.en.rst:35 +msgid "" +"The marshal buffer or :type:`TSMBuffer` is a heap data structure that " +"stores parsed URLs, MIME headers and HTTP headers. You can allocate new " +"objects out of marshal buffers, and change the values within the marshal " +"buffer. Whenever you manipulate an object, you require the handle to the " +"object (:type:`TSMLoc`) and the marshal buffer containing the object (:type:" +"`TSMBuffer`)." +msgstr "" +"マーシャルバッファー すなわち :type:`TSMBuffer` はパースされた URL 、MIME " +"ヘッダー、HTTP ヘッダーを格納するヒープデータ構造です。マーシャルバッファー" +"の外で新しいオブジェクトを割り当て、その値をマーシャルバッファー内で変更する" +"ことができます。オブジェクトを操作するときにはいつでも、オブジェクト (:type:" +"`TSMLoc`) のハンドルとオブジェクトを含んでいるマーシャルバッファー (:type:" +"`TSMBuffer`) が必要になります。" + +#: ../../reference/api/TSMBufferCreate.en.rst:41 +msgid "" +"Any marshal buffer fetched by :func:`TSHttpTxn*Get` will be used by other " +"parts of the system. Be careful not to destroy these shared, transaction " +"marshal buffers." +msgstr "" +":func:`TSHttpTxn*Get` によって取得されたすべてのマーシャルバッファーはシステ" +"ムの他の場所で使用されます。これらの共有されたトランザクションマーシャルバッ" +"ファーを破棄さないよう気をつけてください。" + +#: ../../reference/api/TSMBufferCreate.en.rst:44 +msgid "" +":func:`TSMBufferCreate` creates a new marshal buffer and initializes the " +"reference count. :func:`TSMBufferDestroy` Ignores the reference count and " +"destroys the marshal buffer bufp. The internal data buffer associated with " +"the marshal buffer is also destroyed if the marshal buffer allocated it." +msgstr "" +":func:`TSMBufferCreate` は新しいマーシャルバッファーを作成し参照数を初期化し" +"ます。:func:`TSMBufferDestroy` は参照数を無視してマーシャルバッファー bufp " +"を破棄します。マーシャルバッファーに紐付けられた内部データバッファーがマー" +"シャルバッファーによって割り当てられていた場合はそれも破棄されます。" + +#: ../../reference/api/TSMBufferCreate.en.rst:50 +msgid "" +":func:`TSHandleMLocRelease` Releases the :type:`TSMLoc` mloc created from " +"the :type:`TSMLoc` parent. If a :type:`TSMLoc` is obtained from a " +"transaction, it does not have a parent :type:`TSMLoc`. Use the the " +"constant :data:`TS_NULL_MLOC` as its parent." +msgstr "" +":func:`TSHandleMLocRelease` は :type:`TSMLoc` parent で作成された :type:" +"`TSMLoc` mloc を解放します。:type:`TSMLoc` がトランザクションから取得された" +"場合は parent :type:`TSMLoc` を持っていません。定数 :data:`TS_NULL_MLOC` を " +"parent として使用してください。" + +#: ../../reference/api/TSMBufferCreate.en.rst:56 +msgid "Return values" +msgstr "戻り値" + +#: ../../reference/api/TSMBufferCreate.en.rst:58 +msgid "" +":func:`TSMBufferDestroy` and :func:`TSHandleMLocRelease` return :data:" +"`TS_SUCCESS` on success, or :data:`TS_ERROR` on failure. :func:" +"`TSMBufferCreate` returns the new :type:`TSMBuffer`." +msgstr "" +":func:`TSMBufferDestroy` と :func:`TSHandleMLocRelease` は 成功時に :data:" +"`TS_SUCCESS` を返し、失敗時には :data:`TS_ERROR` を返します。:func:" +"`TSMBufferCreate` は新しい :type:`TSMBuffer` を返します。" + +#: ../../reference/api/TSMBufferCreate.en.rst:63 +msgid "Examples" +msgstr "例" + +#: ../../reference/api/TSMBufferCreate.en.rst:93 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSMBufferCreate.en.rst:95 +msgid ":manpage:`TSAPI(3ts)`" +msgstr ":manpage:`TSAPI(3ts)`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSMimeHdrFieldValueStringGet.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSMimeHdrFieldValueStringGet.en.po new file mode 100644 index 00000000..6b3fe326 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSMimeHdrFieldValueStringGet.en.po @@ -0,0 +1,132 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-01-15 22:31+0900\n" +"PO-Revision-Date: 2014-02-01 15:41+0900\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:21 +msgid "TSMimeHdrFieldValueStringGet" +msgstr "TSMimeHdrFieldValueStringGet" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:25 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:27 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:37 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:39 +msgid "" +"MIME headers and fields can be components of request headers, response " +"headers, or standalone headers created within a Traffic Server plugin. The " +"functions here are all used to access header values of specific types, but " +"it is up to the caller to know if a header has appropriate semantics for " +"the API used. For all but :func:`TSMimeHdrFieldValueStringGet`, an " +"appropriate data conversion algorithm is applied to the header field string." +msgstr "" +"MIME ヘッダーとフィールドはリクエストヘッダーやレスポンスヘッダー、または " +"Traffic Server プラグイン中で作られたスタンドアローンヘッダーを構成すること" +"ができます。この関数は全て特定の型のヘッダーの値にアクセスするために使用され" +"ますが、ヘッダーが API の使用に適切な意味を持つかどうかは呼び出し元によりま" +"す。 :func:`TSMimeHdrFieldValueStringGet` 以外では適切なデータ変換アルゴリズ" +"ムがヘッダーフィールド文字列に適用されます。" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:47 +msgid "" +"All the APIs take a :type:`TSMBuffer` marshal buffer argument, and a :type:" +"`TSMLoc` argument indicating the location of the HTTP headers. The " +"required :data:`field` argument is the locator of a specific header value, " +"as returned by an accessor function such as :func:`TSMimeHdrFieldFind`." +msgstr "" +"全ての API は :type:`TSMBuffer` マーシャルバッファー引数と HTTP ヘッダーの場" +"所を指し示す :type:`TSMLoc` 引数を受け取ります。必須の :data:`field` 引数" +"は :func:`TSMimeHdrFieldFind` のようなアクセッサ関数から返される特定のヘッ" +"ダーの値を保管するものです。" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:53 +msgid "" +"Within the header field, comma-separated values can be retrieved with an " +"index (:data:`idx`) ranging from 0 to the maximum number of fields for this " +"value; this maximum is retrieved using :func:`TSMimeHdrFieldValuesCount`. " +"An :data:`idx` value of ``-1`` has the semantics of retrieving the entire " +"header value, regardless of how many comma-separated values there are. If a " +"header is not comma-separated, an :data:`idx` of :data:`0` or :data:`-1` " +"are the same, but the latter is preferred." +msgstr "" +"ヘッダーフィールドの中で、コンマで分けられた値は 0 からこの値のフィールドの" +"最大値の間のインデックス ( :data:`idx` ) で取得することができます。この最大" +"値は :func:`TSMimeHdrFieldValuesCount` で取得されます。 ``-1`` の :data:" +"`idx` 値はコンマで分けられた値がいくつあるかを考慮せずに、全てのヘッダーの値" +"を取得する意味を持ちます。ヘッダーがコンマで分けられていない場合、:data:`0` " +"と :data:`-1` の :data:`idx` は同じですが、後者が好ましいです。" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:61 +msgid "" +":func:`TSMimeHdrFieldValueStringGet` returns a pointer to the header value, " +"and populated :data:`value_len_ptr` with the length of the value in bytes. " +"The returned header value is not NUL-terminated." +msgstr "" +":func:`TSMimeHdrFieldValueStringGet` はヘッダーの値へのポインターとバイト単" +"位での値の長さで満たされた :data:`value_len_ptr` を返します。返されたヘッ" +"ダーの値は NUL 終端ではありません。" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:66 +msgid "Return values" +msgstr "戻り値" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:68 +msgid "" +"All functions returns the header value with a type matching the respective " +"function name. Using :func:`TSMimeHdrFieldValueDateGet` on a header which " +"does not have date-time semantics always returns :data:`0`." +msgstr "" +"全ての関数はヘッダーの値をそれぞれの関数名にマッチした型で返します。 date-" +"time セマンティクスを持たないヘッダーでの :func:" +"`TSMimeHdrFieldValueDateGet` の使用は常に :data:`0` を返します。" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:73 +msgid "Examples" +msgstr "例" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:75 +msgid "This examples show how to retrieve and copy a specific header." +msgstr "この例はどのように特定のヘッダーを取得しコピーするかを示しています。" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:109 +msgid "See also" +msgstr "参照" + +#: ../../reference/api/TSMimeHdrFieldValueStringGet.en.rst:111 +msgid "" +":manpage:`TSAPI(3ts)`, :manpage:`TSMBufferCreate(3ts)`, :manpage:" +"`TSMimeHdrFieldValuesCount(3ts)`" +msgstr "" +":manpage:`TSAPI(3ts)`, :manpage:`TSMBufferCreate(3ts)`, :manpage:" +"`TSMimeHdrFieldValuesCount(3ts)`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSPluginInit.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSPluginInit.en.po new file mode 100644 index 00000000..a893113c --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSPluginInit.en.po @@ -0,0 +1,92 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSPluginInit.en.rst:21 +msgid "TSPluginInit" +msgstr "TSPluginInit" + +#: ../../reference/api/TSPluginInit.en.rst:24 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSPluginInit.en.rst:26 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSPluginInit.en.rst:32 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSPluginInit.en.rst:34 +msgid "" +":func:`TSPluginInit` must be defined by all plugins. Traffic Server calls " +"this initialization routine when it loads the plugin and sets argc and argv " +"appropriately based on the values in plugin.config. argc is a count of the " +"number of arguments in the argument vector, argv. The count is at least one " +"because the first argument in the argument vector is the plugins name, " +"which must exist in order for the plugin to be loaded. argv is the vector " +"of arguments. The number of arguments in the vector is argc, and argv[0] " +"always contains the name of the plugin shared library. :func:" +"`TSPluginRegister` registers the appropriate SDK version for your plugin. " +"Use this function to make sure that the version of Traffic Server on which " +"your plugin is running supports the plugin." +msgstr "" +":func:`TSPluginInit` はすべてのプラグインで定義されなければなりません。 " +"Traffic Server はプラグインを読み込み argc と argv を plugin.config の値をも" +"とに適切に設定する際にこの初期化ルーチンを呼びます。argc は引数配列 argv に" +"ある引数の数です。引数配列の最初の引数はプラグイン名であり、プラグインが読み" +"込まれるために必ず存在するのでこの数は少なくとも一つになります。argv は引数" +"の配列です。配列内の引数の数は argc となり、argv[0] は常にプラグイン共有ライ" +"ブラリの名前を含んでいます。:func:`TSPluginRegister` はプラグインにとって適" +"切な SDK のバージョンを登録します。プラグインが動作する Traffic Server の" +"バージョンがそのプラグインをサポートすることを確認するためにこの関数を使用し" +"てください。" + +#: ../../reference/api/TSPluginInit.en.rst:48 +msgid "Return values" +msgstr "戻り値" + +#: ../../reference/api/TSPluginInit.en.rst:50 +msgid "" +":func:`TSPluginRegister` returns :const:`TS_ERROR` if the plugin " +"registration failed." +msgstr "" +":func:`TSPluginRegister` はプラグインの登録に失敗すると :const:`TS_ERROR` を" +"返します。" + +#: ../../reference/api/TSPluginInit.en.rst:53 +msgid "Examples" +msgstr "例" + +#: ../../reference/api/TSPluginInit.en.rst:73 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSPluginInit.en.rst:75 +msgid ":manpage:`TSAPI(3ts)`, :manpage:`TSInstallDirGet(3ts)`" +msgstr ":manpage:`TSAPI(3ts)`, :manpage:`TSInstallDirGet(3ts)`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSRemap.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSRemap.en.po new file mode 100644 index 00000000..7e385f3c --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSRemap.en.po @@ -0,0 +1,145 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSRemap.en.rst:21 +msgid "TSRemapInit" +msgstr "TSRemapInit" + +#: ../../reference/api/TSRemap.en.rst:24 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSRemap.en.rst:26 +msgid "`#include ` `#include `" +msgstr "`#include ` `#include `" + +#: ../../reference/api/TSRemap.en.rst:37 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSRemap.en.rst:49 +msgid "" +":func:`TSRemapInit` is a required entry point. This function will be called " +"once when Traffic Server loads the plugin. If the optional :func:" +"`TSRemapDone` entry point is available, Traffic Server will call then when " +"unloading the remap plugin." +msgstr "" +":func:`TSRemapInit` は必須のエントリーポイントです。この関数は Traffic " +"Serverがプラグインをロードする際に一度だけ呼ばれます。省略可能な :func:" +"`TSRemapDone` 関数がある場合、Traffic Server は remap プラグインをアンロード" +"するときにそれを呼び出します。" + +#: ../../reference/api/TSRemap.en.rst:54 +msgid "" +"A remap plugin may be invoked for different remap rules. Traffic Server " +"will call the entry point each time a plugin is specified in a remap rule. " +"When a remap plugin instance is no longer required, Traffic Server will " +"call :func:`TSRemapDeleteInstance`." +msgstr "" +"remap プラグインは異なる remap ルールのために呼び出されます。Traffic Server " +"はプラグインが remap ルールに指定されるたびにエントリーポイントを呼び出しま" +"す。remap プラグインインスタンスが必要なくなると、Traffic Server は :func:" +"`TSRemapDeleteInstance` を呼び出します。" + +#: ../../reference/api/TSRemap.en.rst:59 +msgid "" +":func:`TSRemapDoRemap` is called for each HTTP transaction. This is a " +"mandatory entry point. In this function, the remap plugin may examine and " +"modify the HTTP transaction." +msgstr "" +":func:`TSRemapDoRemap` は各 HTTP トランザクションで呼び出されます。これは必" +"須のエントリーポイントです。この関数では remap プラグインが HTTP トランザク" +"ションを検査し変更するかもしれません。" + +#: ../../reference/api/TSRemap.en.rst:64 +msgid "Return values" +msgstr "戻り値" + +#: ../../reference/api/TSRemap.en.rst:66 +msgid "" +":func:`TSRemapInit` and :func:`TSRemapNewInstance` should return :data:" +"`TS_SUCCESS` on success, and :data:`TS_ERROR` otherwise. A return value of :" +"data:`TS_ERROR` is unrecoverable." +msgstr "" +":func:`TSRemapInit` と :func:`TSRemapNewInstance` は成功時に :data:" +"`TS_SUCCESS` を、それ以外の時には :data:`TS_ERROR` を返すべきです。戻り値 :" +"data:`TS_ERROR` は回復不可能です。" + +#: ../../reference/api/TSRemap.en.rst:70 +msgid "" +":func:`TSRemapDoRemap` returns a status code that indicates whether the " +"HTTP transaction has been modified and whether Traffic Server should " +"continue to evaluate the chain of remap plugins. If the transaction was " +"modified, the plugin should return :data:`TSREMAP_DID_REMAP` or :data:" +"`TSREMAP_DID_REMAP_STOP`; otherwise it should return :data:" +"`TSREMAP_NO_REMAP` or :data:`TSREMAP_NO_REMAP_STOP`. If Traffic Server " +"should not send the transaction to subsequent plugins in the remap chain, " +"return :data:`TSREMAP_NO_REMAP_STOP` or :data:`TSREMAP_DID_REMAP_STOP`. " +"Returning :data:`TSREMAP_ERROR` causes Traffic Server to stop evaluating " +"the remap chain and respond with an error." +msgstr "" +":func:`TSRemapDoRemap` は HTTP トランザクションが変更されたかどうか、そして " +"Traffic Server が remap プラグインの評価を続けるべきかどうかを示すステータス" +"コードを返します。トランザクションが変更されていた場合、プラグインは:data:" +"`TSREMAP_DID_REMAP` または :data:`TSREMAP_DID_REMAP_STOP` を返すべきです。そ" +"れ以外の場合は :data:`TSREMAP_NO_REMAP` または :data:" +"`TSREMAP_NO_REMAP_STOP` を返すべきです。Traffic Server がトランザクションを " +"remap チェーンの続きのプラグインに送信すべきではない場合は、:data:" +"`TSREMAP_NO_REMAP_STOP` または :data:`TSREMAP_DID_REMAP_STOP` を返します。:" +"data:`TSREMAP_ERROR` を返すと Traffic Server に remap チェーンの評価を止めさ" +"せ、エラーを返させます。" + +#: ../../reference/api/TSRemap.en.rst:83 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSRemap.en.rst:85 +msgid ":manpage:`TSAPI(3ts)`" +msgstr ":manpage:`TSAPI(3ts)`" + +#: ../../reference/api/TSRemap.en.rst:39 +msgid "" +"The Traffic Server remap interface provides a simplified mechanism for " +"plugins to manipulate HTTP transactions. A remap plugin is not global; it " +"is configured on a per-remap rule basis, which enables you to customize how " +"URLs are redirected based on individual rules in the remap.config file. " +"Writing a remap plugin consists of implementing one or more of the remap " +"entry points and configuring the remap.config configuration file to route " +"the transaction through your plugin. Multiple remap plugins can be " +"specified for a single remap rule, resulting in a remap plugin chain where " +"each plugin is given an opportunity to examine the HTTP transaction." +msgstr "" +"Traffic Server の remap インターフェースはプラグインが HTTP トランザク" +"ションを操作するための簡素な仕組みを提供します。remap プラグインは大域的" +"ではありません。それは remap ルール毎に設定され、remap.config ファイル内" +"の個々のルールに基づいて URL がどのようにリダイレクトされるかをカスタマイ" +"ズできるようにしています。一つ以上の remap エントリーポイントを満たすこと" +"と、トランザクションがプラグインを通るように remap.config を設定すること" +"から remap プラグインを書くことは成り立っています。複数の remap プラグイ" +"ンが一つの remap ルールに指定されることもあり、HTTP トランザクションを検" +"査する機会が各プラグインに与えられる remap プラグインチェーンとなります。" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSTrafficServerVersionGet.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSTrafficServerVersionGet.en.po new file mode 100644 index 00000000..2076acc1 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSTrafficServerVersionGet.en.po @@ -0,0 +1,68 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:21 +msgid "TSTrafficServerVersionGet" +msgstr "TSTrafficServerVersionGet" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:24 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:26 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:34 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:36 +msgid "" +":func:`TSTrafficServerVersionGet` returns a pointer to a string of " +"characters that indicates the Traffic Server release version. This string " +"must not be modified." +msgstr "" +":func:`TSTrafficServerVersionGet` は Traffic Server のリリースバージョンを示" +"す文字列へのポインターを返します。この文字列は変更してはいけません。" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:40 +msgid "The other APIs return an integer version number." +msgstr "その他の API は整数のバージョン番号を返します。" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:43 +msgid "Example" +msgstr "例" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:96 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSTrafficServerVersionGet.en.rst:98 +msgid ":manpage:`TSAPI(3ts)`" +msgstr ":manpage:`TSAPI(3ts)`" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSTypes.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSTypes.en.po new file mode 100644 index 00000000..29043963 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSTypes.en.po @@ -0,0 +1,121 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSTypes.en.rst:21 +msgid "TSAPI Types" +msgstr "TSAPI 型" + +#: ../../reference/api/TSTypes.en.rst:24 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSTypes.en.rst:25 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSTypes.en.rst:26 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSTypes.en.rst:29 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSTypes.en.rst:31 +msgid "" +"The Apache Traffic Server API provides large number of types. Many of them " +"are specific to a particular API function or function group, but others are " +"used more widely. Those are described on this page." +msgstr "" +"Apache Traffic Server API は多数の型を提供します。それらの多くは特定の API " +"関数や関数グループに固有のものですが、それ以外はより広範囲で使用されます。そ" +"れらはこのページで解説されます。" + +#: ../../reference/api/TSTypes.en.rst:36 +msgid "An opaque type that represents a Traffic Server :term:`continuation`." +msgstr "Traffic Server の\\ :term:`継続`\\ を表現する不明瞭な型。" + +#: ../../reference/api/TSTypes.en.rst:40 +msgid "" +"An enumeration that identifies a specific type of hook for HTTP " +"transactions." +msgstr "HTTP トランザクション用の特定のタイプのフックを特定する列挙型。" + +#: ../../reference/api/TSTypes.en.rst:44 +msgid "An opaque type that represents a Traffic Server :term:`session`." +msgstr "Traffic Server :term:`session` を表現する不明瞭な型。" + +#: ../../reference/api/TSTypes.en.rst:48 +msgid "\"High Resolution Time\"" +msgstr "\"高精度の時間\"" + +#: ../../reference/api/TSTypes.en.rst:50 +msgid "A 64 bit time value, measured in nanoseconds." +msgstr "ナノ秒で計られた 64 ビットの時間値。" + +#: ../../reference/api/TSTypes.en.rst:54 +msgid "" +"An opaque type that represents a Traffic Server HTTP :term:`transaction`." +msgstr "Traffic Server HTTP :term:`transaction` を表現する不明瞭な型。" + +#: ../../reference/api/TSTypes.en.rst:58 +msgid "" +"An enumeration that identifies a :ref:`life cycle hook `." +msgstr ":ref:`life cycle hook ` を識別する列挙型。" + +#: ../../reference/api/TSTypes.en.rst:62 +msgid "" +"An indicator of the results of an API call. A value of :const:`TS_SUCCESS` " +"means the call was successful. Any other value indicates a failure and is " +"specific to the API call." +msgstr "" +"API 呼び出しの結果。:const:`TS_SUCCESS` は呼び出しの成功を意味します。その他" +"の値は失敗を意味し、API 呼び出しに固有のものです。" + +#: ../../reference/api/TSTypes.en.rst:67 +msgid "" +"An enumeration that specifies the type of a value in an internal data " +"structure that is accessible via the API." +msgstr "API からアクセス可能な内部データ構造の値の型を表現する列挙型。" + +#: ../../reference/api/TSTypes.en.rst:71 +msgid "" +"The type used internally for an integer. This corresponds to the value :" +"const:`TS_RECORDDATATYPE_INT` for :type:`TSRecordDataType`." +msgstr "" +"内部で整数として使われる型。これは :type:`TSRecordDataType` の :const:" +"`TS_RECORDDATATYPE_INT` に相当します。" + +#: ../../reference/api/TSTypes.en.rst:76 +msgid "" +"The type used internally for a floating point value. This corresponds to " +"the value :const:`TS_RECORDDATATYPE_FLOAT` for :type:`TSRecordDataType`." +msgstr "" +"内部で浮動小数点の値として使われる型。これは :type:`TSRecordDataType` の :" +"const:`TS_RECORDDATATYPE_FLOAT` に相当します。" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlCreate.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlCreate.en.po new file mode 100644 index 00000000..c3545528 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlCreate.en.po @@ -0,0 +1,113 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSUrlCreate.en.rst:22 +msgid "TSUrlCreate" +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:25 +msgid "Synopsis" +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:27 +msgid "`#include `" +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:35 +msgid "Description" +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:78 +msgid "See also" +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:37 +msgid "" +"The URL data structure is a parsed version of a standard internet URL. The " +"Traffic Server URL API provides access to URL data stored in marshal " +"buffers. The URL functions can create, copy, retrieve or delete entire " +"URLs, and retrieve or modify parts of URLs, such as their host, port or " +"scheme information." +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:43 +msgid "" +":func:`TSUrlCreate` creates a new URL within the marshal buffer :data:" +"`bufp`. Release the resulting handle with a call to :func:" +"`TSHandleMLocRelease`." +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:47 +msgid "" +":func:`TSUrlClone` copies the contents of the URL at location :data:" +"`src_url` within the marshal buffer :data:`src_bufp` to a location within " +"the marshal buffer :data:`dest_bufp`. Release the returned handle with a " +"call to :func:`TSHandleMLocRelease`." +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:52 +msgid "" +":func:`TSUrlCopy` copies the contents of the URL at location :data:" +"`src_url` within the marshal buffer :data:`src_bufp` to the location :data:" +"`dest_url` within the marshal buffer dest_bufp. :func:`TSUrlCopy` works " +"correctly even if :data:`src_bufp` and :data:`dest_bufp` point to different " +"marshal buffers. It is important for the destination URL (its marshal " +"buffer and :type:`TSMLoc`) to have been created before copying into it." +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:59 +msgid "" +":func:`TSUrlParse` parses a URL. The :data:`start` pointer is both an input " +"and an output parameter and marks the start of the URL to be parsed. After " +"a successful parse, the :data:`start` pointer equals the :data:`end` " +"pointer. The :data:`end` pointer must be one byte after the last character " +"you want to parse. The URL parsing routine assumes that everything between :" +"data:`start` and :data:`end` is part of the URL. It is up to higher level " +"parsing routines, such as :func:`TSHttpHdrParseReq`, to determine the " +"actual end of the URL." +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:69 +msgid "Return values" +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:71 +msgid "" +"The :func:`TSUrlParse` function returns a :type:`TSParseResult`, where :" +"data:`TS_PARSE_ERROR` indicates an error. Success is indicated by one of :" +"data:`TS_PARSE_DONE`, :data:`TS_PARSE_OK` or :data:`TS_PARSE_CONT`. The " +"other APIs all return a :type:`TSReturnCode`, indicating success (:data:" +"`TS_SUCCESS`) or failure (:data:`TS_ERROR`) of the operation." +msgstr "" + +#: ../../reference/api/TSUrlCreate.en.rst:80 +msgid "" +":manpage:`TSAPI(3ts)`, :manpage:`TSMBufferCreate(3ts)`, :manpage:" +"`TSUrlHostGet(3ts)`, :manpage:`TSUrlHostSet(3ts)`, :manpage:" +"`TSUrlStringGet(3ts)`, :manpage:`TSUrlPercentEncode(3ts)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlHostGet.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlHostGet.en.po new file mode 100644 index 00000000..d9a26370 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlHostGet.en.po @@ -0,0 +1,100 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-01-15 22:31+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSUrlHostGet.en.rst:22 +msgid "TSUrlHostGet" +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:25 +msgid "Synopsis" +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:27 +msgid "`#include `" +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:40 +msgid "Description" +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:42 +msgid "" +"The URL data structure is a parsed version of a standard internet URL. The " +"Traffic Server URL API provides access to URL data stored in marshal " +"buffers. The URL functions can create, copy, retrieve or delete entire " +"URLs, and retrieve or modify parts of URLs, such as their host, port or " +"scheme information." +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:48 +msgid "" +":func:`TSUrlSchemeGet`, :func:`TSUrlUserGet`, :func:`TSUrlPasswordGet`, :" +"func:`TSUrlHostGet`, :func:`TSUrlHttpParamsGet`, :func:`TSUrlHttpQueryGet` " +"and :func:`TSUrlHttpFragmentGet` each retrieve an internal pointer to the " +"specified portion of the URL from the marshall buffer :data:`bufp`. The " +"length of the returned string is placed in :data:`length` and a pointer to " +"the URL portion is returned." +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:55 +msgid "" +":func:`TSUrlPortGet` retrieves the port number portion of the URL located " +"at :data:`offset` within the marshal buffer :data:`bufp`. If there is no " +"explicit port number in the URL, a canonicalized valued is returned based " +"on the URL scheme." +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:61 +msgid "Return values" +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:63 +msgid "" +"All APIs except :func:`TSUrlPortGet` returns a string, which is not " +"guaranteed to be NULL terminated. You must therefore always use the :data:" +"`length` value to determine the actual length of the returned string." +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:67 +msgid "" +":func:`TSUrlPortGet` simply returns the port number as an integer, possibly " +"canonicalized with :data:`80` for HTTP and :data:`443` for HTTPS schemes. " +"If there is neither port nor scheme information available in the URL, :data:" +"`0` is returned." +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:73 +msgid "See also" +msgstr "" + +#: ../../reference/api/TSUrlHostGet.en.rst:75 +msgid "" +":manpage:`TSAPI(3ts)`, :manpage:`TSUrlCreate(3ts)`, :manpage:" +"`TSUrlHostSet(3ts)`, :manpage:`TSUrlStringGet(3ts)`, :manpage:" +"`TSUrlPercentEncode(3ts)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlHostSet.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlHostSet.en.po new file mode 100644 index 00000000..a73dc17c --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlHostSet.en.po @@ -0,0 +1,92 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-01-15 22:31+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSUrlHostSet.en.rst:22 +msgid "TSUrlHostSet" +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:25 +msgid "Synopsis" +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:27 +msgid "`#include `" +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:40 +msgid "Description" +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:42 +msgid "" +"The URL data structure is a parsed version of a standard internet URL. The " +"Traffic Server URL API provides access to URL data stored in marshal " +"buffers. The URL functions can create, copy, retrieve or delete entire " +"URLs, and retrieve or modify parts of URLs, such as their host, port or " +"scheme information." +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:48 +msgid "" +":func:`TSUrlSchemeSet`, :func:`TSUrlUserSet`, :func:`TSUrlPasswordSet`, :" +"func:`TSUrlHostSet`, :func:`TSUrlHttpParamsSet`, :func:`TSUrlHttpQuerySet` " +"and :func:`TSUrlHttpFragmentSet` each set the specified portion of the URL " +"located at offset within the marshal buffer :data:`bufp` to the string " +"value. If :data:`length` is :data:`-1` then these functions assume that " +"value is NULL-terminated. Otherwise, the length of the :data:`string` value " +"is taken to be the value of :data:`length`. These functions copy the string " +"to within :data:`bufp`, so it can be subsequently modified or deleted." +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:57 +msgid "" +":func:`TSUrlPortSet` sets the port number portion of the URL located at :" +"data:`offset` within the marshal buffer :data:`bufp` to the value port. " +"Normal canonicalization based on the URL scheme still applies." +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:62 +msgid "Return values" +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:64 +msgid "" +"All these APIs returns a :type:`TSReturnCode`, indicating success (:data:" +"`TS_SUCCESS`) or failure (:data:`TS_ERROR`) of the operation." +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:68 +msgid "See also" +msgstr "" + +#: ../../reference/api/TSUrlHostSet.en.rst:70 +msgid "" +":manpage:`TSAPI(3ts)`, :manpage:`TSUrlCreate(3ts)`, :manpage:" +"`TSUrlHostGet(3ts)`, :manpage:`TSUrlStringGet(3ts)`, :manpage:" +"`TSUrlPercentEncode(3ts)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlPercentEncode.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlPercentEncode.en.po new file mode 100644 index 00000000..a6e01c89 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlPercentEncode.en.po @@ -0,0 +1,91 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-01-15 22:31+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:22 +msgid "TSUrlPercentEncode" +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:25 +msgid "Synopsis" +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:27 +msgid "`#include `" +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:34 +msgid "Description" +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:36 +msgid "" +"The URL data structure is a parsed version of a standard internet URL. The " +"Traffic Server URL API provides access to URL data stored in marshal " +"buffers. The URL functions can create, copy, retrieve or delete entire " +"URLs, and retrieve or modify parts of URLs, such as their host, port or " +"scheme information." +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:42 +msgid "" +":func:`TSUrlPercentEncode` performs percent-encoding of the URL object, " +"storing the new string in the :data:`dst` buffer. The :data:`length` " +"parameter will be set to the new (encoded) string length, or :data:`0` if " +"the encoding failed. :func:`TSStringPercentEncode` is similar but operates " +"on a string. If the optional :data:`map` parameter is provided (not :data:" +"`NULL`) , it should be a map of characters to encode." +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:49 +msgid "" +":func:`TSStringPercentDecode` perform percent-decoding of the string in " +"the :data:`str` buffer, writing to the :data:`dst` buffer. The source and " +"destination can be the same, in which case they overwrite. The decoded " +"string is always guaranteed to be no longer than the source string." +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:55 +msgid "Return values" +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:57 +msgid "" +"All these APIs returns a :type:`TSReturnCode`, indicating success (:data:" +"`TS_SUCCESS`) or failure (:data:`TS_ERROR`) of the operation." +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:61 +msgid "See also" +msgstr "" + +#: ../../reference/api/TSUrlPercentEncode.en.rst:63 +msgid "" +":manpage:`TSAPI(3ts)`, :manpage:`TSUrlCreate(3ts)`, :manpage:" +"`TSUrlHostGet(3ts)`, :manpage:`TSUrlHostSet(3ts)`, :manpage:" +"`TSUrlStringGet(3ts)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlStringGet.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlStringGet.en.po new file mode 100644 index 00000000..7b0619cb --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSUrlStringGet.en.po @@ -0,0 +1,88 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-01-15 22:31+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSUrlStringGet.en.rst:22 +msgid "TSUrlStringGet" +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:25 +msgid "Synopsis" +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:27 +msgid "`#include `" +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:35 +msgid "Description" +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:37 +msgid "" +"The URL data structure is a parsed version of a standard internet URL. The " +"Traffic Server URL API provides access to URL data stored in marshal " +"buffers. The URL functions can create, copy, retrieve or delete entire " +"URLs, and retrieve or modify parts of URLs, such as their host, port or " +"scheme information." +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:43 +msgid "" +":func:`TSUrlStringGet` constructs a string representation of the URL " +"located at :data:`offset` within the marshal buffer :data:`bufp`. :func:" +"`TSUrlStringGet` stores the length of the allocated string in the " +"parameter :data:`length`. This is the same length that :func:" +"`TSUrlLengthGet` returns. The returned string is allocated by a call to :" +"func:`TSmalloc` and must be freed by a call to :func:`TSfree`. If length " +"is :data:`NULL` then no attempt is made to de-reference it." +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:51 +msgid "" +":func:`TSUrlLengthGet` calculates the length of the URL located at :data:" +"`offset` within the marshal buffer bufp as if it were returned as a string. " +"This length will be the same as the length returned by :func:" +"`TSUrlStringGet`." +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:56 +msgid "" +":func:`TSUrlPrint` formats a URL stored in an :type:`TSMBuffer` to an :type:" +"`TSIOBuffer`." +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:60 +msgid "See also" +msgstr "" + +#: ../../reference/api/TSUrlStringGet.en.rst:62 +msgid "" +":manpage:`TSAPI(3ts)`, :manpage:`TSmalloc(3ts)`, :manpage:" +"`TSUrlCreate(3ts)`, :manpage:`TSUrlHostGet(3ts)`, :manpage:" +"`TSUrlHostSet(3ts)`, :manpage:`TSUrlPercentEncode(3ts)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/TSmalloc.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/TSmalloc.en.po new file mode 100644 index 00000000..a443ffef --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/TSmalloc.en.po @@ -0,0 +1,137 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/TSmalloc.en.rst:22 +msgid "TSmalloc" +msgstr "TSmalloc" + +#: ../../reference/api/TSmalloc.en.rst:25 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/api/TSmalloc.en.rst:27 +msgid "`#include `" +msgstr "`#include `" + +#: ../../reference/api/TSmalloc.en.rst:38 +msgid "Description" +msgstr "解説" + +#: ../../reference/api/TSmalloc.en.rst:40 +msgid "" +"Traffic Server provides a number of routines for allocating and freeing " +"memory. These routines correspond to similar routines in the C library. For " +"example, :func:`TSrealloc` behaves like the C library routine :func:" +"`realloc`. There are two reasons to use the routines provided by Traffic " +"Server. The first is portability. The Traffic Server API routines behave " +"the same on all of Traffic Servers supported platforms. For example, :func:" +"`realloc` does not accept an argument of :data:`NULL` on some platforms. " +"The second reason is that the Traffic Server routines actually track the " +"memory allocations by file and line number. This tracking is very " +"efficient, is always turned on, and is useful for tracking down memory " +"leaks." +msgstr "" +"Traffic Server はメモリの割り当てと解放のための多数のルーチンを提供します。" +"これらのルーチンは C ライブラリの似たものに対応します。例えば、:func:" +"`TSrealloc` は C ライブラリルーチンの :func:`realloc` のように振る舞います。" +"Traffic Server によって提供されるルーチンを使う理由は二つあります。一つ目は" +"移植性です。Traffic Server API ルーチンは Traffic Server が対応するすべての" +"プラットフォーム上で同様に振る舞います。例えば、:func:`realloc` はいくつかの" +"プラットフォームで :data:`NULL` の引数を受け付けません。二つ目の理由は " +"Traffic Server ルーチンが実はファイルと行番号でメモリの割り当てを追跡してい" +"ることです。この追跡はとても優れており、常に有効化され、メモリーリークを探し" +"出すのに便利です。" + +#: ../../reference/api/TSmalloc.en.rst:51 +msgid "" +":func:`TSmalloc` returns a pointer to size bytes of memory allocated from " +"the heap. Traffic Server uses :func:`TSmalloc` internally for memory " +"allocations. Always use :func:`TSfree` to release memory allocated by :func:" +"`TSmalloc`; do not use :func:`free`." +msgstr "" +":func:`TSmalloc` はヒープに割り当てられた size バイトのメモリを指すポイン" +"ターを返します。Traffic Server メモリ割り当てのために内部で :func:" +"`TSmalloc` を使用します。:func:`TSmalloc` で割り当てられたメモリーを解放する" +"には常に :func:`TSfree` を使用し、:func:`free` は使用しないでください。" + +#: ../../reference/api/TSmalloc.en.rst:67 +msgid "" +":func:`TSstrlcpy` copies up to size - 1 characters from the NUL-terminated " +"string src to dst, NUL-terminating the result." +msgstr "" +":func:`TSstrlcpy` は NUL 終端文字列 src から dst に size - 1 文字までコピー" +"し、結果を NUL 終端します。`" + +#: ../../reference/api/TSmalloc.en.rst:70 +msgid "" +":func:`TSstrlcat` appends the NUL-terminated string src to the end of dst. " +"It will append at most size - strlen(dst) - 1 bytes, NUL-terminating the " +"result." +msgstr "" +":func:`TSstrlcat` は NUL 終端文字列 src を dst の終わりに追加します。これは" +"最大で size - strlen(dst) - 1 を追加し、結果を NUL 終端します。" + +#: ../../reference/api/TSmalloc.en.rst:74 +msgid "" +":func:`TSfree` releases the memory allocated by :func:`TSmalloc` or :func:" +"`TSrealloc`. If ptr is :data:`NULL`, :func:`TSfree` does no operation." +msgstr "" +":func:`TSfree` は :func:`TSmalloc` または :func:`TSrealloc` によって割り当て" +"られたメモリーを解放します。ptr が :data:`NULL` の場合、:func:`TSfree` は何" +"も行いません。" + +#: ../../reference/api/TSmalloc.en.rst:78 +msgid "See also" +msgstr "参考" + +#: ../../reference/api/TSmalloc.en.rst:79 +msgid ":manpage:`TSAPI(3ts)`" +msgstr ":manpage:`TSAPI(3ts)`" + +#: ../../reference/api/TSmalloc.en.rst:56 +msgid "" +":func:`TSstrdup` returns a pointer to a new string that is a duplicate of " +"the string pointed to by str. The memory for the new string is allocated " +"using :func:`TSmalloc` and should be freed by a call to :func:`TSfree`. :" +"func:`TSstrndup` returns a pointer to a new string that is a duplicate of " +"the string pointed to by str and size bytes long. The new string will be " +"NUL-terminated. This API is very useful for transforming non NUL-terminated " +"string values returned by APIs such as :func:`TSMimeHdrFieldValueStringGet` " +"into NUL-terminated string values. The memory for the new string is " +"allocated using :func:`TSmalloc` and should be freed by a call to :func:" +"`TSfree`." +msgstr "" +":func:`TSstrdup` は str で指し示された文字列を複製した新しい文字列を指す" +"ポインターを返します。新しい文字列のメモリーは :func:`TSmalloc` を使用し" +"て割り当てられ、:func:`TSfree` の呼び出しに酔って解放されるべきです。:" +"func:`TSstrndup` は size バイトの長さの str で指し示された文字列を複製し" +"た新しい文字列を指すポインターを返します。新しい文字列は NUL 終端されま" +"す。この API は :func:`TSMimeHdrFieldValueStringGet` などによって返された" +"非 NUL 終端文字列値を NUL 終端文字列値に変換するのにとても便利です。新し" +"い文字列のメモリーは :func:`TSmalloc` を使用して割り当てられ、:func:" +"`TSfree` の呼び出しに酔って解放されるべきです。" diff --git a/doc/locale/ja/LC_MESSAGES/reference/api/index.en.po b/doc/locale/ja/LC_MESSAGES/reference/api/index.en.po new file mode 100644 index 00000000..0267427c --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/api/index.en.po @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/api/index.en.rst:2 +msgid "API Reference" +msgstr "API リファレンス" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/index.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/index.en.po new file mode 100644 index 00000000..599e91f3 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/index.en.po @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: 2014-01-11 20:10+0900\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/index.en.rst:19 +msgid "Command Reference" +msgstr "コマンドリファレンス" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_cop.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_cop.en.po new file mode 100644 index 00000000..5ce5deca --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_cop.en.po @@ -0,0 +1,84 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/traffic_cop.en.rst:20 +msgid "traffic_cop" +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:25 +msgid "Description" +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:27 +msgid "" +":program:`traffic_cop` is a watchdog program that is responsible for " +"starting :program:`traffic_manager` and :program:`traffic_server` and " +"monitoring them for responsiveness. If either of these processes are " +"determined to be unresponsive, :program:`traffic_cop` will kill and restart " +"them." +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:33 +msgid "" +"On Linux, :program:`traffic_cop` will also monitor available memory and " +"swap space, restarting the watched processes if the available memory falls " +"below a minimum threshold. The memory thresholds can be configured with " +"the :ts:cv:`proxy.config.cop.linux_min_swapfree_kb` and :ts:cv:`proxy." +"config.cop.linux_min_memfree_kb` variables." +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:39 +msgid "The following options are available:" +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:43 +msgid "" +"Kill children using ``SIGSTOP`` instead of ``SIGKILL``. This option is " +"primarily for debugging." +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:48 +msgid "Print version information and exit." +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:50 +msgid "" +":program:`traffic_cop` ordinarily logs to syslog, however for debugging " +"purposes, the ``--enable-cop-debug`` build option causes it to log trace " +"messages to :file:`/tmp/traffic_cop.trace`." +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:56 +msgid "See also" +msgstr "" + +#: ../../reference/commands/traffic_cop.en.rst:58 +msgid "" +":manpage:`syslog(1)`, :manpage:`traffic_manager(8)`, :manpage:" +"`traffic_server(8)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_line.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_line.en.po new file mode 100644 index 00000000..9cbdd683 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_line.en.po @@ -0,0 +1,217 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: 2014-01-15 23:12+0900\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/traffic_line.en.rst:20 +msgid "traffic_line" +msgstr "traffic_line" + +#: ../../reference/commands/traffic_line.en.rst:23 +msgid "Synopsis" +msgstr "概要" + +#: ../../reference/commands/traffic_line.en.rst:26 +msgid ":program:`traffic_line` [options]" +msgstr ":program:`traffic_line` [options]" + +#: ../../reference/commands/traffic_line.en.rst:31 +msgid "Description" +msgstr "解説" + +#: ../../reference/commands/traffic_line.en.rst:33 +msgid "" +":program:`traffic_line` is used to execute individual Traffic Server " +"commands and to script multiple commands in a shell." +msgstr "" +":program:`traffic_line` はシェルの中で単一の Traffic Server コマンドを実行し" +"たり、複数のコマンドを書くために使われます。" + +#: ../../reference/commands/traffic_line.en.rst:37 +msgid "Options" +msgstr "オプション" + +#: ../../reference/commands/traffic_line.en.rst:43 +msgid "" +"Bounce all Traffic Server nodes in the cluster. Bouncing Traffic Server " +"shuts down and immediately restarts Traffic Server, node-by-node." +msgstr "" +"クラスター内の全ての Traffic Server ノードを再起動します。ノードからノード" +"へ Traffic Server のバウンシングはシャットダウンし、すぐに Traffic Server を" +"再起動します。" + +#: ../../reference/commands/traffic_line.en.rst:49 +msgid "" +"Bounce Traffic Server on the local node. Bouncing Traffic Server shuts down " +"and immediately restarts the Traffic Server node." +msgstr "" +"ローカルノードで Traffic Server を再起動させます。Traffic Server のバウンス" +"はシャットダウンし、すぐに Traffic Server ノードを再起動します。" + +#: ../../reference/commands/traffic_line.en.rst:54 +msgid "Clears accumulated statistics on all nodes in the cluster." +msgstr "クラスター内の全てのノードに蓄積された統計をクリアします。" + +#: ../../reference/commands/traffic_line.en.rst:58 +msgid "Clears accumulated statistics on the local node." +msgstr "ローカルノードに蓄積された統計をクリアします。" + +#: ../../reference/commands/traffic_line.en.rst:62 +msgid "Print usage information and exit." +msgstr "使い方の情報を表示し、終了します。" + +#: ../../reference/commands/traffic_line.en.rst:66 +msgid "" +"Restart the :program:`traffic_manager` and :program:`traffic_server` " +"processes on the local node." +msgstr "" +"ローカルノードの :program:`traffic_manager` と :program:`traffic_server` プ" +"ロセスを再起動します。" + +#: ../../reference/commands/traffic_line.en.rst:71 +msgid "" +"Restart the :program:`traffic_manager` process and the :program:" +"`traffic_server` process on all the nodes in a cluster." +msgstr "" +"クラスター内の全てのノードの :program:`traffic_manager` プロセスと :program:" +"`traffic_server` プロセスを再起動します。" + +#: ../../reference/commands/traffic_line.en.rst:76 +msgid "" +"Display specific performance statistics or a current configuration setting." +msgstr "特定のパフォーマンス統計や現在の設定を表示します。" + +#: ../../reference/commands/traffic_line.en.rst:81 +msgid "" +"Set the configuration variable named `VAR`. The value of the configuration " +"variable is given by the :option:`traffic_line -v` option. Refer to the :" +"file:`records.config` documentation for a list of the configuration " +"variables you can specify." +msgstr "" +"`VAR` という名前の設定変数をセットします。設定変数の値は :option:" +"`traffic_line -v` オプションで与えられます。記述できる設定変数のリストについ" +"ては :file:`records.config` のドキュメントを参照してください。" + +#: ../../reference/commands/traffic_line.en.rst:88 +msgid "Shut down Traffic Server on the local node." +msgstr "ローカルノードの Traffic Server をシャットダウンします。" + +#: ../../reference/commands/traffic_line.en.rst:92 +msgid "Start Traffic Server on the local node." +msgstr "ローカルノードの Traffic Server を起動します。" + +#: ../../reference/commands/traffic_line.en.rst:96 +msgid "Specify the value to set when setting a configuration variable." +msgstr "設定変数を設定する際の値を記述します。" + +#: ../../reference/commands/traffic_line.en.rst:100 +msgid "Print version information and exit." +msgstr "バージョン情報を表示して、終了します。" + +#: ../../reference/commands/traffic_line.en.rst:104 +msgid "" +"Initiate a Traffic Server configuration file reread. Use this command to " +"update the running configuration after any configuration file modification." +msgstr "" +"Traffic Server 設定ファイルの再読み込みを始めます。設定ファイルを変更した後" +"で、実行する設定を更新するのにこのコマンドを使用してください。" + +#: ../../reference/commands/traffic_line.en.rst:110 +msgid "Reset performance statistics to zero across the cluster." +msgstr "クラスター内のパフォーマンス統計をゼロにリセットします。" + +#: ../../reference/commands/traffic_line.en.rst:114 +msgid "Reset performance statistics to zero on the local node." +msgstr "ローカルノードのパフォーマンス統計をゼロにリセットします。" + +#: ../../reference/commands/traffic_line.en.rst:141 +msgid "Performance Statistics" +msgstr "パフォーマンス統計" + +#: ../../reference/commands/traffic_line.en.rst:145 +msgid "" +"The :option:`traffic_line -r` option accepts the following variable names::" +msgstr ":option:`traffic_line -r` オプションは次の変数名を受け入れます。" + +#: ../../reference/commands/traffic_line.en.rst:797 +msgid "Examples" +msgstr "例" + +#: ../../reference/commands/traffic_line.en.rst:799 +msgid "Configure Traffic Server to log in Squid format::" +msgstr "Squid フォーマットでログを取るように Traffic Server を設定するには" + +#: ../../reference/commands/traffic_line.en.rst:806 +msgid "Files" +msgstr "ファイル" + +#: ../../reference/commands/traffic_line.en.rst:808 +msgid ":file:`records.config`, :file:`ssl_multicert.config`" +msgstr ":file:`records.config`、 :file:`ssl_multicert.config`" + +#: ../../reference/commands/traffic_line.en.rst:811 +msgid "See also" +msgstr "参照" + +#: ../../reference/commands/traffic_line.en.rst:813 +msgid ":manpage:`records.config(5)`" +msgstr ":manpage:`records.config(5)`" + +#: ../../reference/commands/traffic_line.en.rst:118 +msgid "" +"Mark a cache storage device as offline. The storage is identified by a " +"*path* which must match exactly a path specified in :file:`storage.config`. " +"This removes the storage from the cache and redirects requests that would " +"have used this storage to other storage. This has exactly the same effect " +"as a disk failure for that storage. This does not persist across restarts " +"of the :program:`traffic_server` process." +msgstr "" +"キャッシュストレージデバイスをオフラインとしてマークします。ストレージは " +":file:`storage.config` 内で指定されているパスと完全に一致している \\ *パス*\\ " +"によって特定されます。これはストレージをキャッシュから取り除き、このストレージ" +"ではなく他のストレージを使うようにリクエストをリダイレクトします。これはその" +"ストレージが故障したときとまったく同じ効果を持っています。これは " +":program:`traffic_server` プロセスのリスタートをまたいで持続しません。" + +#: ../../reference/commands/traffic_line.en.rst:125 +msgid "List all alarm events that have not been acknowledged (cleared)." +msgstr "承認 ( 消去 ) されていないすべてのアラームイベントを一覧表示します。" + +#: ../../reference/commands/traffic_line.en.rst:129 +msgid "" +"Clear (acknowledge) an alarm event. The arguments are \"all\" for all " +"current alarms, a specific alarm number (e.g. ''1''), or an alarm string " +"identifier (e.g. ''MGMT_ALARM_PROXY_CONFIG_ERROR'')." +msgstr "" +"アラームイベントを消去 ( 承認 ) します。引数は \"all\" で現在のすべての" +"アラーム、特定のアラーム番号 ( 例 ``1``) 、もしくはアラーム文字列識別子 " +"( 例 ''MGMT_ALARM_PROXY_CONFIG_ERROR'') です。" + +#: ../../reference/commands/traffic_line.en.rst:135 +msgid "" +"Show the current proxy server status, indicating if we're running or not." +msgstr "" +"プロキシーサーバーが実行中かどうかを示している現在の状態を表示します。" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_logcat.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_logcat.en.po new file mode 100644 index 00000000..4f29249a --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_logcat.en.po @@ -0,0 +1,131 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/traffic_logcat.en.rst:20 +msgid "traffic_logcat" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:23 +msgid "Synopsis" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:25 +msgid "" +":program:`traffic_logcat` [-o output-file | -a] [-CEhSVw2] [input-file ...]" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:31 +msgid "Description" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:33 +msgid "" +"To analyse a binary log file using standard tools, you must first convert " +"it to ASCII. :program:`traffic_logcat` does exactly that." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:37 +msgid "Options" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:41 +msgid "Specifies where the command output is directed." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:45 +msgid "" +"Automatically generates the output filename based on the input filename. If " +"the input is from stdin, then this option is ignored. For example::" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:51 +msgid "generates::" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:57 +msgid "Follows the file, like :manpage:`tail(1)` ``-f``" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:61 +msgid "" +"Attempts to transform the input to Netscape Common format, if possible." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:65 +msgid "" +"Attempts to transform the input to Netscape Extended format, if possible." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:69 +msgid "Attempts to transform the input to Squid format, if possible." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:73 +msgid "" +"Attempt to transform the input to Netscape Extended-2 format, if possible." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:81 +msgid "Print usage information and exit." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:85 +msgid "Print version information and exit." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:88 +msgid "" +"Use only one of the following options at any given time: ``-S``, ``-C``, ``-" +"E``, or ``-2``." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:90 +msgid "" +"If no input files are specified, then :program:`traffic_logcat` reads from " +"the standard input (``stdin``). If you do not specify an output file, then :" +"program:`traffic_logcat` writes to the standard output (``stdout``)." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:94 +msgid "" +"For example, to convert a binary log file to an ASCII file, you can use " +"the :program:`traffic_logcat` command with either of the following options " +"below::" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:101 +msgid "The binary log file is not modified by this command." +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:104 +msgid "See Also" +msgstr "" + +#: ../../reference/commands/traffic_logcat.en.rst:106 +msgid ":manpage:`tail(1)`" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_logstats.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_logstats.en.po new file mode 100644 index 00000000..221886c3 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_logstats.en.po @@ -0,0 +1,43 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/traffic_logstats.en.rst:20 +msgid "traffic_logstats" +msgstr "" + +#: ../../reference/commands/traffic_logstats.en.rst:25 +msgid "Description" +msgstr "" + +#: ../../reference/commands/traffic_logstats.en.rst:63 +msgid "Print usage information and exit." +msgstr "" + +#: ../../reference/commands/traffic_logstats.en.rst:67 +msgid "Print version information and exit." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_manager.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_manager.en.po new file mode 100644 index 00000000..12699fc5 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_manager.en.po @@ -0,0 +1,39 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/traffic_manager.en.rst:20 +msgid "traffic_manager" +msgstr "" + +#: ../../reference/commands/traffic_manager.en.rst:25 +msgid "Description" +msgstr "" + +#: ../../reference/commands/traffic_manager.en.rst:43 +msgid "Environment" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_server.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_server.en.po new file mode 100644 index 00000000..104bfb11 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_server.en.po @@ -0,0 +1,47 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/traffic_server.en.rst:20 +msgid "traffic_server" +msgstr "" + +#: ../../reference/commands/traffic_server.en.rst:25 +msgid "Description" +msgstr "" + +#: ../../reference/commands/traffic_server.en.rst:69 +msgid "Print usage information and exit." +msgstr "" + +#: ../../reference/commands/traffic_server.en.rst:73 +msgid "Print version information and exit." +msgstr "" + +#: ../../reference/commands/traffic_server.en.rst:76 +msgid "Environment" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_shell.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_shell.en.po new file mode 100644 index 00000000..00bc50bc --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_shell.en.po @@ -0,0 +1,39 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/traffic_shell.en.rst:20 +msgid "traffic_shell" +msgstr "" + +#: ../../reference/commands/traffic_shell.en.rst:25 +msgid "Description" +msgstr "" + +#: ../../reference/commands/traffic_shell.en.rst:29 +msgid "Print version information and exit." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_top.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_top.en.po new file mode 100644 index 00000000..056a998f --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/traffic_top.en.po @@ -0,0 +1,35 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.2\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2014-01-15 22:31+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/traffic_top.en.rst:20 +msgid "traffic_top" +msgstr "" + +#: ../../reference/commands/traffic_top.en.rst:25 +msgid "Description" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/tspush.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/tspush.en.po new file mode 100644 index 00000000..9836c86f --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/tspush.en.po @@ -0,0 +1,39 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/tspush.en.rst:20 +msgid "tspush" +msgstr "" + +#: ../../reference/commands/tspush.en.rst:25 +msgid "Description" +msgstr "" + +#: ../../reference/commands/tspush.en.rst:35 +msgid "Print usage information and exit." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/tstop.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/tstop.en.po new file mode 100644 index 00000000..63a07ad5 --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/tstop.en.po @@ -0,0 +1,35 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/tstop.en.rst:20 +msgid "tstop" +msgstr "" + +#: ../../reference/commands/tstop.en.rst:25 +msgid "Description" +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/commands/tsxs.en.po b/doc/locale/ja/LC_MESSAGES/reference/commands/tsxs.en.po new file mode 100644 index 00000000..d2a5449c --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/commands/tsxs.en.po @@ -0,0 +1,39 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/commands/tsxs.en.rst:20 +msgid "tsxs" +msgstr "" + +#: ../../reference/commands/tsxs.en.rst:25 +msgid "Description" +msgstr "" + +#: ../../reference/commands/tsxs.en.rst:47 +msgid "Print usage information and exit." +msgstr "" diff --git a/doc/locale/ja/LC_MESSAGES/reference/configuration/cache.config.en.po b/doc/locale/ja/LC_MESSAGES/reference/configuration/cache.config.en.po new file mode 100644 index 00000000..d9c6e74b --- /dev/null +++ b/doc/locale/ja/LC_MESSAGES/reference/configuration/cache.config.en.po @@ -0,0 +1,309 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +msgid "" +msgstr "" +"Project-Id-Version: Apache Traffic Server 4.1\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-11-08 17:57+0900\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: ../../reference/configuration/cache.config.en.rst:22 +msgid "cache.config" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:24 +msgid "" +"The :file:`cache.config` file defines how Traffic Server caches web " +"objects. You can add caching rules to specify the following: - Not to cache " +"objects from specific IP addresses - How long to pin particular objects in " +"the cache - How long to consider cached objects as fresh - Whether to " +"ignore no-cache directives from the server" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:32 +msgid "" +"After you modify the :file:`cache.config` file, navigate to the Traffic " +"Server bin directory; then run the :option:`traffic_line -x` command to " +"apply changes. When you apply the changes to a node in a cluster, Traffic " +"Server automatically applies the changes to all other nodes in the cluster." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:39 +msgid "Format" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:41 +msgid "" +"Each line in the :file:`cache.config` file contains a caching rule. Traffic " +"Server recognizes three space-delimited tags::" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:46 +msgid "" +"You can use more than one secondary specifier in a rule. However, you " +"cannot repeat a secondary specifier. The following list shows the possible " +"primary destinations with allowed values." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:54 +msgid "``dest_domain``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:53 +msgid "" +"A requested domain name. Traffic Server matches the domain name of the " +"destination from the URL in the request." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:60 +msgid "``dest_host``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:59 +msgid "" +"A requested hostname. Traffic Server matches the hostname of the " +"destination from the URL in the request." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:66 +msgid "``dest_ip``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:65 +msgid "" +"A requested IP address. Traffic Server matches the IP address of the " +"destination in the request." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:71 +msgid "``url_regex``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:71 +msgid "A regular expression (regex) to be found in a URL." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:73 +msgid "" +"The secondary specifiers are optional in the :file:`cache.config` file. The " +"following list shows possible secondary specifiers with allowed values." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:79 +msgid "``port``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:79 +msgid "A requested URL port." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:84 +msgid "``scheme``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:84 +msgid "A request URL protocol: http or https." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:89 +msgid "``prefix``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:89 +msgid "A prefix in the path part of a URL." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:94 +msgid "``suffix``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:94 +msgid "A file suffix in the URL." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:99 +msgid "``method``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:99 +msgid "A request URL method: get, put, post, trace." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:104 +msgid "``time``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:104 +msgid "A time range, such as 08:00-14:00." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:109 +msgid "``src_ip``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:109 +msgid "A client IP address." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:111 +msgid "The following list shows possible actions and their allowed values." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:128 +msgid "``action``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:117 +msgid "One of the following values:" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:119 +msgid "" +"``never-cache`` configures Traffic Server to never cache specified objects." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:121 +msgid "" +"``ignore-no-cache`` configures Traffic Server to ignore all ``Cache-" +"Control: no-cache`` headers." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:123 +msgid "" +"``ignore-client-no-cache`` configures Traffic Server to ignore ``Cache-" +"Control: no-cache`` headers from client requests." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:125 +msgid "" +"``ignore-server-no-cache`` configures Traffic Server to ignore ``Cache-" +"Control: no-cache`` headers from origin server responses." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:127 +msgid "" +"``cluster-cache-local`` configures the cluster cache to allow for this " +"content to be stored locally on every cluster node." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:158 +msgid "``pin-in-cache``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:154 +msgid "``d`` for days; for example: 2d" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:155 +msgid "``h`` for hours; for example: 10h" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:156 +msgid "``m`` for minutes; for example: 5m" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:157 +msgid "``s`` for seconds; for example: 20s" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:158 +msgid "mixed units; for example: 1h15m20s" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:164 +msgid "``revalidate``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:172 +msgid "``ttl-in-cache``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:175 +msgid "Examples" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:177 +msgid "" +"The following example configures Traffic Server to revalidate ``gif`` and " +"``jpeg`` objects in the domain ``mydomain.com`` every 6 hours, and all " +"other objects in ``mydomain.com`` every hour. The rules are applied in the " +"order listed. ::" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:138 +msgid "``cache-responses-to-cookies``" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:133 +msgid "" +"Change the style of caching with regard to cookies. This effectively " +"overrides the configuration parameter :ref:`proxy.config.http.cache." +"cache_responses_to_cookies` and uses the same values with the same " +"semantics. The override happens only for requests that match." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:143 +msgid "" +"Preserves objects in cache, preventing them from being overwritten. Does " +"not affect objects that are determined not to be cacheable. This setting " +"can have performance issues, and severely affect the cache. For instance, " +"if the primary destination matches all objects, once the cache is full, no " +"new objects could get written as nothing would be evicted. Similarly, for " +"each cache-miss, each object would incur extra checks to determine if the " +"object it would replace could be overwritten." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:151 +msgid "" +"The value is the amount of time you want to keep the object(s) in the " +"cache. The following time formats are allowed:" +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:163 +msgid "" +"For objects that are in cache, overrides the the amount of time the " +"object(s) are to be considered fresh. Use the same time formats as ``pin-in-" +"cache``." +msgstr "" + +#: ../../reference/configuration/cache.config.en.rst:169 +msgid "" +"Forces object(s) to become cached, as if they had a Cache-Control: max-age:" +"